Beispiel #1
0
def main(argv):
    """Go Main Go."""
    table = argv[1]
    nt = NetworkTable(["WFO", "RFC", "NWS", "NCEP", "CWSU", "WSO"])
    pgconn = get_dbconn('afos', user='******')
    mpgconn = get_dbconn('mesosite')
    cursor = pgconn.cursor()
    mcursor = mpgconn.cursor()
    df = read_sql("""
        SELECT source, count(*) from """ + table + """
        WHERE source is not null GROUP by source ORDER by source
    """, pgconn, index_col='source')
    for source, row in df.iterrows():
        if source[0] not in ['K', 'P']:
            continue
        if source in UNKNOWN:
            continue
        iemsource = source[1:] if source[0] == 'K' else source
        if iemsource in nt.sts:
            continue
        if source in XREF:
            cursor.execute("""
                UPDATE """ + table + """ SET source = %s WHERE source = %s
            """, (XREF[source], source))
            print(("Correcting %s -> %s, %s rows"
                   ) % (source, XREF[source], cursor.rowcount))
        else:
            if row['count'] < 10:
                print("skipping %s as row count is low" % (source, ))
                continue
            mcursor.execute("""
                WITH centers as (
                    select id, geom::geography from stations where network in
                    ('WFO', 'RFC', 'NWS', 'NCEP', 'CWSU', 'WSO')
                ), asos as (
                    SELECT geom::geography from stations where id = %s
                    and network ~* 'ASOS'
                )
                SELECT c.id as center, st_distance(c.geom, a.geom)
                from centers c, asos a ORDER by st_distance ASC
            """, (iemsource, ))
            if mcursor.rowcount < 5:
                print("Source: %s is double unknown" % (source, ))
                continue
            for i, row2 in enumerate(mcursor):
                print("%s %s %.2f" % (source, row2[0], row2[1]))
                if i > 4:
                    break
            newval = input(
                "What do you want to do with %s (count:%s)? " % (
                    source, row['count']))
            if len(newval) == 4:
                XREF[source] = newval
            else:
                UNKNOWN.append(source)

    print(json.dumps(XREF, indent=4))
    print(UNKNOWN)
    cursor.close()
    pgconn.commit()
Beispiel #2
0
def main():
    """Go!"""
    nt = NetworkTable("IA_ASOS")
    pgconn = get_dbconn('iem')
    df = read_sql("""
    SELECT id, valid, tmpf::int as tmpf, dwpf::int as dwpf,
    sknt from current_log c JOIN stations t
    on (c.iemid = t.iemid) WHERE t.network = 'IA_ASOS' and
    c.valid > 'TODAY' and c.tmpf > 70 ORDER by id ASC
    """, pgconn, index_col=None)

    pgconn = get_dbconn('asos')
    for _, row in df.iterrows():
        df2 = read_sql("""
            SELECT valid, tmpf, dwpf, sknt from alldata WHERE station = %s
            and valid < '2018-10-03' and tmpf::int >= %s and dwpf::int >= %s
            and sknt >= %s ORDER by valid DESC
        """, pgconn, params=(row['id'], row['tmpf'], row['dwpf'], row['sknt']),
                       index_col=None)
        if len(df2.index) > 5:
            continue
        lastdate = None
        if not df2.empty:
            lastdate = df2.iloc[0]['valid'].date()
        print(
            ("%s,%s,%s,%s,%.0f,%s,%s,%s"
             ) % (row['id'], row['valid'], row['tmpf'], row['dwpf'],
                  row['sknt'], len(df2.index), lastdate,
                  nt.sts[row['id']]['archive_begin'].year)
        )
Beispiel #3
0
def main(argv):
    """Go Main Go."""
    afostable = argv[1]
    stations = load_stations()
    iem_pgconn = get_dbconn('iem')
    icursor = iem_pgconn.cursor()
    pgconn = get_dbconn('afos')
    cursor = pgconn.cursor()
    cursor.execute("""
        SELECT entered, data from """ + afostable + """
        WHERE substr(pil, 1, 3) = 'DSM'
        ORDER by entered
    """)
    updates = 0
    for row in cursor:
        if row[1].startswith("\001"):
            try:
                dsm = parser(row[1], utcnow=row[0])
                dsm.tzlocalize(stations)
            except Exception as exp:
                print(exp)
                print(row[1])
                continue
            dsm.sql(icursor)
        else:
            try:
                dsm = process(row[1])
                if dsm is None:
                    continue
                dsm.compute_times(row[0])
                dsm.tzlocalize(stations[dsm.station])
            except Exception as exp:
                print(exp)
                print(row[1])
                continue
            # print(row[1])
            dsm.sql(icursor)
            # print("%s %s %s/%s %s\n\n" % (
            #    dsm.station, dsm.date, dsm.groupdict['high'],
            #    dsm.groupdict['low'], dsm.groupdict['pday']))
        updates += 1
        if updates % 1000 == 0:
            icursor.close()
            iem_pgconn.commit()
            icursor = iem_pgconn.cursor()

    icursor.close()
    iem_pgconn.commit()
Beispiel #4
0
def main():
    """Go """
    pgconn = get_dbconn('coop', user='******')
    df = gpd.read_postgis("""
    WITH data as (
        SELECT station, year, avg((high+low)/2.) as avg_temp,
        sum(precip) as tot_precip, sum(snow) as tot_snow
        from alldata_ia where year >= 1998 and year < 2016
        and station != 'IA0000' and substr(station, 3, 1) != 'C'
        GROUP by station, year
    ), agg as (
        SELECT * from data WHERE tot_snow > 0
    )
    select a.*, t.geom from agg a JOIN stations t on (a.station = t.id)
    WHERE t.network = 'IACLIMATE'
    """, pgconn, geom_col='geom', index_col=None)
    df['cycles'] = 0

    for station in tqdm.tqdm(df['station'].unique()):
        uri = ("http://iem.local/plotting/auto/plot/121/network:IACLIMATE::"
               "station:%s::thres1:30-32::dpi:100.csv") % (station, )
        req = requests.get(uri)
        ldf = pd.read_csv(StringIO(req.content), index_col='year')
        ldf['total'] = ldf['30-32f'] + ldf['30-32s']
        for year in range(1998, 2016):
            val = ldf.loc[year]['total']
            df.loc[((df['year'] == year) &
                    (df['station'] == station)), 'cycles'] = val

    for year in range(1998, 2016):
        df2 = df[df['year'] == year]
        df2.to_file('iowaclimate_%s.shp' % (year, ))
        shutil.copyfile('/mesonet/data/gis/meta/4326.prj',
                        'iowaclimate_%s.prj' % (year, ))
Beispiel #5
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('postgis')
    sql = get_polygon()
    df = read_sql(sql, pgconn, index_col='year')
    print(df)

    (fig, ax) = plt.subplots(1, 1)

    ax.bar(df.index.values - 0.2, df['count'].values, width=0.4, fc='r')
    ax.set_ylabel("Warning Count", color='r')

    y2 = ax.twinx()
    y2.bar(df.index.values + 0.2, df['area'].values, width=0.4, fc='b')
    y2.set_ylabel("Size (Continental United States)", color='b')

    p1 = plt.Rectangle((0, 0), 1, 1, fc="r")
    p3 = plt.Rectangle((0, 0), 1, 1, fc="b")
    ax.legend([p1, p3], ["Counts", "Size"], loc=2)
    ax.grid(True)

    ax.set_title("NWS *Storm Based* Tornado + Severe Thunderstorm Warnings")
    ax.set_ylim(0, 90000)
    y2.set_ylim(0, 25)
    ax.set_xlim(1985.5, 2017.5)

    fig.text(0.01, 0.01, 'Generated %s' % (datetime.date.today(), ))
    fig.savefig('test.png')
Beispiel #6
0
def get_data():
    """The data we want and the data we need"""
    pgconn = get_dbconn('coop', user='******')
    df = read_sql("""
        select year, week_ending, num_value, state_alpha from nass_quickstats
        where commodity_desc = 'CORN' and statisticcat_desc = 'PROGRESS'
        and unit_desc = 'PCT SILKING' and
        util_practice_desc = 'ALL UTILIZATION PRACTICES'
        and num_value is not null
        ORDER by state_alpha, week_ending
    """, pgconn, index_col=None)
    df['week_ending'] = pd.to_datetime(df['week_ending'])
    data = {}
    for state, gdf in df.groupby('state_alpha'):
        sdf = gdf.copy()
        sdf.set_index('week_ending', inplace=True)
        newdf = sdf.resample('D').interpolate(method='linear')
        y10 = newdf[newdf['year'] > 2007]
        doyavgs = y10.groupby(y10.index.strftime("%m%d")).mean()
        lastdate = pd.Timestamp(newdf.index.values[-1]).to_pydatetime()
        data[state] = {'date': lastdate,
                       'avg': doyavgs.at[lastdate.strftime("%m%d"),
                                         'num_value'],
                       'd2017': newdf.at[lastdate,
                                         'num_value']}
        print("%s %s" % (state, data[state]))
    return data
Beispiel #7
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('postgis')
    years = np.arange(1997, 2019)
    days = np.arange(1, 32)
    data = np.ma.zeros((len(years), len(days)))
    data[-1, -10:] = -1
    cursor = pgconn.cursor()
    cursor.execute("""
    SELECT extract(year from issued) as year,
    extract(day from issued) as day, count(*) from
    watches where type = 'TOR' and extract(month from issued) = 5
    GROUP by year, day
    """)
    for row in cursor:
        data[int(row[0]) - 1997, int(row[1]) - 1] = row[2]
    data.mask = (data < 0)
    print(data)
    ax = plt.axes([0.1, 0.15, 0.9, 0.75])
    cmap = plt.get_cmap('plasma')
    cmap.set_under('white')
    cmap.set_bad('gray')
    res = ax.imshow(np.flipud(data), cmap=cmap, extent=[0.5, 31.5, 1996.5, 2018.5],
                    vmin=1, aspect='auto', zorder=4, alpha=0.8)
    plt.colorbar(res, label='count')
    ax.set_yticks(range(1998, 2019, 2))
    ax.grid(True, zorder=3)
    ax.set_title("1997-2018 May\nStorm Prediction Center Daily Tornado Watch Count")
    ax.set_xlabel("CDT Calendar Day of May")
    plt.gcf().text(0.01, 0.01, '@akrherz, generated 22 May 2018')
    plt.gcf().savefig('test.png')
def main():
    """Ingest things from Gio"""
    pgconn = get_dbconn('td')
    cursor = pgconn.cursor()

    df = pd.read_csv(sys.argv[1])
    uniqueid = sys.argv[2]

    cursor.execute("""
        DELETE from watertable_data where uniqueid = %s and
        valid between %s and %s
    """, (uniqueid, df['Date'].min(), df['Date'].max()))
    deleted = cursor.rowcount
    if deleted > 0:
        print("Removed %s" % (deleted,))

    inserts = 0
    for _idx, row in df.iterrows():
        if row['Date'] == ' ' or row['Date'] is None:
            continue
        cursor.execute("""
        INSERT into watertable_data
        (uniqueid, plotid, valid, depth_mm_qc, depth_mm)
        VALUES (%s, %s, %s, %s, %s)
        """, (uniqueid, row['plot'], row['Date'], row.get('WAT4'),
              row.get('WAT4')))
        inserts += 1
    print("Inserted %s, Deleted %s entries for %s" % (inserts, deleted,
                                                      uniqueid))
    cursor.close()
    pgconn.commit()
    pgconn.close()
Beispiel #9
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('coop')
    df = read_sql("""
    WITH data as (
        SELECT snowd, lead(high) OVER (ORDER by day ASC) as nextday
        from alldata_ia
        WHERE station = 'IA0200')
    select * from data where snowd >= 0.1 and snowd < 18
    """, pgconn, index_col=None)
    df['cat'] = (df['snowd'] / 2).astype(int)
    print(df)
    (fig, ax) = plt.subplots(1, 1)
    df.boxplot(column='nextday', by='cat', ax=ax)
    ax.set_xticks(range(1, 10))
    ax.set_xticklabels(['Trace-2', '2-4', '4-6', '6-8', '8-10', '10-12',
                        '12-14', '14-16', '16-18'])
    ax.axhline(43, color='r')
    ax.text(9.5, 43, r"43$^\circ$F", color='r')
    ax.set_xlabel("Reported Snow Depth [inch], n=%.0f" % (len(df.index), ))
    ax.set_ylabel(r"Next Day High Temperature [$^\circ$F]")
    plt.suptitle('')
    ax.set_title(("[IA0200] Ames Snow Depth and Next Day High Temperature\n"
                  r"boxes are inner quartile range, whiskers are 1.5 $\sigma$"))
    fig.savefig('test.png')
Beispiel #10
0
def main():
    """Go"""
    pgconn = get_dbconn("afos")

    acursor = pgconn.cursor()

    payload = getattr(sys.stdin, 'buffer', sys.stdin).read()
    payload = payload.decode('ascii', errors='ignore')
    data = payload.replace("\r\r\n", "z")

    tokens = re.findall(r"(\.A [A-Z0-9]{3} .*?=)", data)

    utcnow = datetime.datetime.utcnow()
    gmt = utcnow.replace(tzinfo=pytz.utc)
    gmt = gmt.replace(second=0)

    table = "products_%s_0106" % (gmt.year,)
    if gmt.month > 6:
        table = "products_%s_0712" % (gmt.year,)

    for token in tokens:
        # print(tokens)
        sql = """
        INSERT into """ + table + """
        (pil, data, entered) values(%s,%s,%s)
        """
        sqlargs = ("%s%s" % ('RR7', token[3:6]), token.replace("z", "\n"), gmt)
        acursor.execute(sql, sqlargs)

    acursor.close()
    pgconn.commit()
    pgconn.close()
Beispiel #11
0
def main():
    """Go Main Go."""
    config = get_properties()
    access_token = '...'
    api = twitter.Api(consumer_key=config['bot.twitter.consumerkey'],
                      consumer_secret=config['bot.twitter.consumersecret'],
                      access_token_key=access_token,
                      access_token_secret='...')
    pgconn = get_dbconn('mesosite')
    cursor = pgconn.cursor()
    cursor2 = pgconn.cursor()
    cursor.execute("""
    SELECT screen_name from iembot_twitter_oauth where user_id is null
    """)
    for row in cursor:
        try:
            user_id = api.UsersLookup(screen_name=row[0])[0].id
        except Exception as _exp:
            print("FAIL %s" % (row[0], ))
            continue
        print("%s -> %s" % (row[0], user_id))
        cursor2.execute("""
        UPDATE iembot_twitter_oauth SET user_id = %s where screen_name = %s
        """, (user_id, row[0]))
    cursor2.close()
    pgconn.commit()
Beispiel #12
0
def main():
    """Go Main Go."""
    nt = NetworkTable(["WFO", "CWSU"])
    df = read_sql("""
    SELECT screen_name, access_token, access_token_secret
    from iembot_twitter_oauth
    WHERE access_token is not null
    """, get_dbconn('mesosite'), index_col='screen_name')

    wfos = list(nt.sts.keys())
    wfos.sort()
    for wfo in wfos:
        username = "******" % (wfo.lower()[-3:], )
        if username not in df.index:
            print("%s is unknown?" % (username, ))
            continue
        api = twitter.Api(
            consumer_key=PROPS['bot.twitter.consumerkey'],
            consumer_secret=PROPS['bot.twitter.consumersecret'],
            access_token_key=df.at[username, 'access_token'],
            access_token_secret=df.at[username, 'access_token_secret'])

        location = "%s, %s" % (nt.sts[wfo]['name'], nt.sts[wfo]['state'])
        desc = (
            'Syndication of National Weather Service Office %s. '
            'Unmonitored, contact @akrherz who developed this.'
        ) % (location, )
        print("len(desc) = %s" % (len(desc), ))
        profileURL = "https://mesonet.agron.iastate.edu/projects/iembot/"
        twuser = api.UpdateProfile(
            description=desc, profileURL=profileURL,
            location=location)
        # twuser.AsDict()['followers_count']
        print("%s %s" % (username, location))
def main():
    """Go Main Go."""
    pgconn = get_dbconn('postgis')
    base = read_sql("""
    select segid, archive_begin, archive_end from roads_base
    """, pgconn, index_col='segid')
    for year in range(2003, 2018):
        table = "roads_%s_%s_log" % (year, year + 1)
        df = read_sql("""
            SELECT segid, min(valid), max(valid) from """ + table + """
            WHERE valid is not null GROUP by segid
        """, pgconn, index_col='segid')
        for segid, row in df.iterrows():
            curmin = base.at[segid, 'archive_begin']
            curmax = base.at[segid, 'archive_end']
            if curmin is None or row['min'] < curmin:
                base.at[segid, 'archive_begin'] = row['min']
            if curmax is None or row['max'] > curmax:
                base.at[segid, 'archive_end'] = row['max']

    cursor = pgconn.cursor()
    for segid, row in base.iterrows():
        if pd.isnull(row['archive_begin']) or pd.isnull(row['archive_end']):
            continue
        print("%s %s -> %s" % (
            segid, row['archive_begin'], row['archive_end']))
        cursor.execute("""
        update roads_base SET archive_begin = %s, archive_end = %s
        where segid = %s
        """, (row['archive_begin'], row['archive_end'], segid))
    cursor.close()
    pgconn.commit()
Beispiel #14
0
def main():
    """Go Main Go"""
    output = open('insert.sql', 'w')
    pgconn = get_dbconn('postgis')
    df = read_sql("""
        SELECT st_x(st_centroid(geom)) as lon,
        st_y(st_centroid(geom)) as lat, st_abbrv, name, iemid
        from climdiv ORDER by iemid
    """, pgconn, index_col='iemid')
    for station, row in df.iterrows():
        if row['st_abbrv'] in ['AK', 'HI', 'PR']:
            continue
        name = "%s - %s Climate Division" % (state_names[row['st_abbrv']],
                                             row['name'].title())
        # can't have commas
        name = name.replace(",", " ")
        sql = """
        INSERT into stations(id, name, network, country, state,
        plot_name, online, metasite, geom) VALUES
        ('%s', '%s', '%sCLIMATE',
         'US', '%s', '%s', 't', 't', 'SRID=4326;POINT(%s %s)');
        """ % (station, name, row['st_abbrv'], row['st_abbrv'],
               name, row['lon'], row['lat'])
        output.write(sql)
    output.close()
Beispiel #15
0
def main():
    """Go Main"""
    pgconn = get_dbconn('postgis')
    df = read_postgis("""
    select geom, issue from sbw where wfo = 'PUB' and phenomena = 'TO'
    and significance = 'W' and status = 'NEW' and issue > '2007-10-01'
    and issue < '2019-01-01'
    """, pgconn, geom_col='geom', crs={'init': 'epsg:4326', 'no_defs': True})

    bounds = df['geom'].total_bounds
    # bounds = [-102.90293903,   40.08745967,  -97.75622311,   43.35172981]
    bbuf = 0.25
    mp = MapPlot(
        sector='custom', west=bounds[0] - bbuf,
        south=bounds[1] - bbuf,
        east=bounds[2] + bbuf, north=bounds[3] + bbuf,
        continentalcolor='white',  # '#b3242c',
        title='NWS Pueblo Issued Tornado Warnings [2008-2018]',
        subtitle='%s warnings plotted' % (len(df.index), ))
    crs_new = ccrs.Mercator()
    crs = ccrs.PlateCarree()
    new_geometries = [crs_new.project_geometry(ii, src_crs=crs)
                      for ii in df['geom'].values]
    # mp.draw_cwas()
    mp.ax.add_geometries(new_geometries, crs=crs_new, lw=0.5,
                         edgecolor='red', facecolor='None', alpha=1,
                         zorder=5)
    mp.drawcounties()
    mp.postprocess(filename='test.png')
Beispiel #16
0
    def __init__(self, network, cursor=None):
        """A class representing a network(s) of IEM metadata

        Args:
          network (str or list): A network identifier used by the IEM, this can
            be either a string or a list of strings.
          cursor (dbcursor,optional): A database cursor to use for the query
        """
        self.sts = OrderedDict()
        if network is None:
            return

        if cursor is None:
            dbconn = get_dbconn('mesosite', user='******')
            cursor = dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
        if isinstance(network, str):
            network = [network, ]
        cursor.execute("""
            WITH myattrs as (
                SELECT a.iemid, array_agg(attr) as attrs,
                array_agg(value) as attr_values from stations s JOIN
                station_attributes a on (s.iemid = a.iemid) WHERE
                s.network in %s GROUP by a.iemid
            )
            SELECT s.*, ST_x(geom) as lon, ST_y(geom) as lat,
            a.attrs, a.attr_values
            from stations s LEFT JOIN myattrs a
            on (s.iemid = a.iemid)
            WHERE network in %s ORDER by name ASC
            """, (tuple(network), tuple(network)))
        for row in cursor:
            self.sts[row['id']] = dict(row)
            self.sts[row['id']]['attributes'] = dict(
                zip(row['attrs'] or [], row['attr_values'] or []))
Beispiel #17
0
def main():
    """Map some CLI data"""
    pgconn = get_dbconn('iem')

    df = read_sql("""
    WITH data as (
        SELECT station, snow_jul1 - snow_jul1_normal as s
        from cli_data where valid = '2019-02-18' and snow_jul1 > 0
        and snow_jul1_normal > 0)

    select station, st_x(geom) as lon, st_y(geom) as lat, c.s as val from
    data c JOIN stations s on (s.id = c.station)
    WHERE s.network = 'NWSCLI'
    """, pgconn, index_col=None)
    df['color'] = '#ff0000'
    df.loc[df['val'] > 0, 'color'] = '#0000ff'

    mp = MapPlot(sector='midwest', axisbg='white',
                 title=("2018-2019 Snowfall Total Departure "
                        "from Average [inches]"),
                 subtitle='18 Feb 2019 Based on NWS CLI Reporting Sites')
    mp.plot_values(
        df['lon'].values, df['lat'].values,
        df['val'].values, fmt='%.1f', textsize=12, color=df['color'].values,
        labelbuffer=1)
    mp.postprocess(filename='test.png')
Beispiel #18
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('asos')
    dfin = read_sql("""
    with mob as (
        select date_trunc('hour', valid) as ts, avg(dwpf) from alldata
        where station = 'MOB' and dwpf is not null GROUP by ts),
    cmi as (
        select date_trunc('hour', valid) as ts, avg(dwpf) from alldata
        where station = 'CMI' and dwpf is not null GROUP by ts),
    agg as (
        select m.ts, m.avg as dwpf, c.avg as tmpf
        from mob m JOIN cmi c on (m.ts = c.ts))
    select extract(month from ts) as month, extract(hour from ts) as hour,
    sum(case when dwpf >= tmpf then 1 else 0 end) / count(*)::float * 100.
    as freq from agg GROUP by month, hour ORDER by month, hour
    """, pgconn, index_col=None)

    df = dfin.pivot("month", "hour", "freq")

    fig, ax = plt.subplots(figsize=(9, 6))
    ax.set_title(("Hourly Frequency of Mobile (MOB) Dew Point\n"
                  "greater than or equal to Champaign (CMI) Dew Point"))
    sns.heatmap(df, annot=True, fmt=".0f", linewidths=.5, ax=ax, vmin=5, vmax=100)
    print(ax.get_yticks())
    ax.set_xlabel("Hour of Day (CDT or CST)")
    ax.set_xticklabels(["Mid", "1AM", "2", "3", "4", "5", "6", "7", "8", "9", "10",
                   "11", "Noon", "1PM", "2", "3", "4", "5", "6", "7", "8", "9", "10",
                   "11"])
    ax.set_yticklabels(calendar.month_abbr[1:])
    fig.savefig('test.png')
Beispiel #19
0
def run_db(dbname):
    """ Lets do an actual database """
    dbconn = get_dbconn(dbname)
    cursor = dbconn.cursor()

    check_management(cursor)

    cursor.execute("""
        SELECT version, updated from iem_schema_manager_version
    """)
    row = cursor.fetchone()
    baseversion = row[0]
    print(("Database: %-15s has revision: %3s (%s)"
           ) % (dbname, baseversion, row[1].strftime("%Y-%m-%d %H:%M")))

    while True:
        baseversion += 1
        fn = '%s/%s.sql' % (dbname, baseversion)
        if not os.path.isfile(fn):
            break
        print('    -> Attempting schema upgrade #%s ...' % (baseversion,))
        cursor.execute(open(fn).read())

        cursor.execute("""
            UPDATE iem_schema_manager_version
            SET version = %s, updated = now()
            """, (baseversion,))

    if len(sys.argv) == 1:
        cursor.close()
        dbconn.commit()
    else:
        print('    + No changes made since argument provided')
    dbconn.close()
Beispiel #20
0
def main():
    """Go!"""
    pgconn = get_dbconn('afos')
    cursor = pgconn.cursor()

    payload = getattr(sys.stdin, 'buffer', sys.stdin).read()
    prod = product.TextProduct(payload.decode('ascii', errors='ignore'))
    prod.valid = prod.valid.replace(second=0, minute=0, microsecond=0)
    offset = prod.unixtext.find(prod.afos[:3]) + 7
    sections = re.split("\n\n", prod.unixtext[offset:])

    table = "products_%s_0106" % (prod.valid.year,)
    if prod.valid.month > 6:
        table = "products_%s_0712" % (prod.valid.year,)

    for sect in sections:
        if sect[1:4].strip() == "":
            continue
        # print("%s%s %s %s %s" % (prod.afos[:3], sect[1:4], prod.source,
        #                          prod.valid, prod.wmo))
        cursor.execute("""
            INSERT into """+table+"""
            (pil, data, source, entered, wmo) values (%s, %s, %s, %s, %s)
        """, (prod.afos[:3] + sect[1:4], prod.text[:offset] + sect,
              prod.source, prod.valid, prod.wmo))

    cursor.close()
    pgconn.commit()
    pgconn.close()
Beispiel #21
0
def gendata():
    """Generate the data we need."""
    pgconn = get_dbconn('idep')
    dates = pd.date_range('2019-01-01', '2019-12-31').strftime("%m%d")
    for scenario in [0, 36, 37, 38]:
        df = read_sql("""
        WITH data as (
            SELECT r.huc_12, to_char(valid, 'mmdd') as sday,
            sum(avg_delivery) / 10. as d from results_by_huc12 r, huc12 h
            WHERE r.huc_12 = h.huc_12 and h.scenario = 0 and h.states ~* 'IA'
            and r.scenario = %s and valid between '2008-01-01' and '2018-01-01'
            GROUP by r.huc_12, sday),
        huc_count as (
            select count(*) from huc12 WHERE scenario = 0 and states ~* 'IA'
        ),
        agg as (
            SELECT sday, sum(d) * 4.463 as data
            FROM data d, huc_count h GROUP by sday ORDER by sday
        )
        select sday, data / h.count as avg from agg a, huc_count h
        WHERE sday != '0229' ORDER by sday
        """, pgconn, params=(scenario, ), index_col='sday')
        df = df.reindex(dates).fillna(0)
        df.index.name = 'sday'
        df.to_csv('/tmp/s%s.csv' % (scenario, ), index=True)
Beispiel #22
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('postgis')
    cursor = pgconn.cursor()
    cursor2 = pgconn.cursor()

    table = "sbw_%s" % (sys.argv[1],)

    cursor.execute("""SET TIME ZONE 'UTC'""")
    cursor.execute("""
    SELECT report, expire from """+table+""" where
    status = 'CAN' and polygon_end != expire
    and phenomena in ('TO', 'SV')
    """)

    for row in cursor:
        prod = parser(row[0])
        if not prod.is_single_action():
            continue
        vtec = prod.segments[0].vtec[0]
        cursor2.execute("""UPDATE """ + table + """ SET expire = %s
                    where %s <= expire and wfo = %s and phenomena = %s
                    and significance = %s and eventid = %s
        """, (prod.valid, prod.valid, vtec.office, vtec.phenomena,
              vtec.significance, vtec.etn))
        print("%s -> %s rows:%s" % (row[1], prod.valid, cursor2.rowcount))

    print('%s Processed %s rows' % (sys.argv[1], cursor.rowcount))

    cursor2.close()
    pgconn.commit()
    pgconn.close()
Beispiel #23
0
def main(argv):
    """Go Main Go."""
    year = argv[1]
    pgconn = get_dbconn('asos')
    cursor = pgconn.cursor()
    cursor2 = pgconn.cursor()
    cursor.execute("""
        SELECT station, valid, sknt, gust, metar from t"""+year+"""
        WHERE round(gust::numeric, 2) != gust::int
    """)
    hits = 0
    for row in cursor:
        m = WIND_RE.findall(row[4])
        if not m:
            continue
        sknt, gust = m[0]
        dirty = False
        if int(sknt) != row[2]:
            # print("sknt old: %s new: %s" % (row[2], int(sknt)))
            dirty = True
        if int(gust) != row[3]:
            # print("gust old: %s new: %s" % (row[3], int(gust)))
            dirty = True
        if not dirty:
            continue
        cursor2.execute("""
            UPDATE t"""+year+""" SET sknt = %s, gust = %s WHERE
            station = %s and valid = %s
        """, (sknt, gust, row[0], row[1]))
        hits += 1
    print("%s %s/%s rows updated" % (year, hits, cursor.rowcount))
    cursor2.close()
    pgconn.commit()
Beispiel #24
0
def excel_summary(hucs, name):
    """Generate a Excel file with yearly summaries."""
    pgconn = get_dbconn('idep')
    df = read_sql("""
        SELECT huc_12, extract(year from valid) as year,
        sum(avg_loss) * 4.463 as loss_ton_per_acre,
        sum(avg_delivery) * 4.463 as delivery_ton_per_acre,
        sum(qc_precip) / 25.4 as precip_inch,
        sum(avg_runoff) / 25.4 as runoff_inch
        from results_by_huc12 WHERE
        scenario = 0 and huc_12 in %s and valid >= '2007-01-01'
        and valid < '2018-01-01' GROUP by huc_12, year
    """, pgconn, params=(tuple(hucs), ))
    writer = pd.ExcelWriter(
        '%s.xlsx' % (name, ), options={'remove_timezone': True})
    df.to_excel(writer, 'Yearly Totals', index=False)
    gdf = df.groupby('huc_12').mean()
    gdf[['loss_ton_per_acre', 'delivery_ton_per_acre', 'precip_inch',
         'runoff_inch']].to_excel(writer, 'Yearly Averages')
    format1 = writer.book.add_format({'num_format': '0.00'})
    worksheet = writer.sheets['Yearly Totals']
    worksheet.set_column('A:A', 18)
    worksheet.set_column('C:F', 20, format1)
    worksheet = writer.sheets['Yearly Averages']
    worksheet.set_column('A:A', 18)
    worksheet.set_column('B:E', 20, format1)

    writer.save()
Beispiel #25
0
def iemob():
    """Database."""
    res = blah()
    ts = utc(2015, 9, 1, 1, 0)
    sid = ''.join(random.choice(
                string.ascii_uppercase + string.digits) for _ in range(7))
    res.iemid = 0 - random.randint(0, 1000)
    res.ob = observation.Observation(sid, 'FAKE', ts)
    res.conn = get_dbconn('iem')
    res.cursor = res.conn.cursor(
        cursor_factory=psycopg2.extras.DictCursor)
    # Create fake station, so we can create fake entry in summary
    # and current tables
    res.cursor.execute("""
        INSERT into stations(id, network, iemid, tzname)
        VALUES (%s, 'FAKE', %s, 'UTC')
    """, (sid, res.iemid))
    res.cursor.execute("""
        INSERT into current(iemid, valid) VALUES
        (%s, '2015-09-01 00:00+00')
    """, (res.iemid, ))
    res.cursor.execute("""
        INSERT into summary_2015(iemid, day) VALUES
        (%s, '2015-09-01')
    """, (res.iemid, ))
    return res
Beispiel #26
0
def main():
    """Go"""
    pgconn = get_dbconn('postgis')
    df = read_sql("""
    with data as (
        select distinct date(issue), wfo, eventid from warnings
        where phenomena = 'TO' and significance = 'W' and issue > '1996-01-01')
    select wfo, date, count(*) from data GROUP by wfo, date
    ORDER by wfo, date
    """, pgconn, index_col=None)
    rows = []
    for wfo, df2 in df.groupby(by='wfo'):
        maxdate = df2['date'].max()
        mindate = df2['date'].min()
        data = [0] * ((maxdate - mindate).days + 4)
        for _, row in df2.iterrows():
            data[(row['date'] - mindate).days] = row['count']
        for i in range(0, len(data) - 2):
            if sum(data[i:i+3]) > 50 or wfo in COMPAREWFOS:
                date = mindate + datetime.timedelta(days=i)
                rows.append(dict(wfo=wfo, date=date, count=sum(data[i:i+3]),
                                 one=data[i], two=data[i+1], three=data[i+2]))

    df = pd.DataFrame(rows)
    df.sort_values('count', ascending=False, inplace=True)
    for _, row in df.head(15).iterrows():
        printr(row)
    for wfo in COMPAREWFOS:
        df2 = df[df['wfo'] == wfo]
        printr(df2.iloc[0])
Beispiel #27
0
def loadqc(cursor=None, date=None):
    """ Load the current IEM Tracker QC'd variables

    Args:
      cursor (cursor,optional): Optionally provided database cursor
      date (date,optional): Defaults to today, which tickets are valid for now
    """
    if date is None:
        date = datetime.date.today()
    qdict = {}
    if cursor is None:
        portfolio = get_dbconn('portfolio', user='******')
        cursor = portfolio.cursor()

    cursor.execute("""
        select s_mid, sensor, status from tt_base
        WHERE sensor is not null
        and date(entered) <= %s and
        (status != 'CLOSED' or closed > %s)
        and s_mid is not null
    """, (date, date))
    for row in cursor:
        sid = row[0]
        if row[0] not in qdict:
            qdict[sid] = {}
        for vname in row[1].split(","):
            qdict[sid][vname.strip()] = True
    return qdict
Beispiel #28
0
def main(argv):
    """Go Main Go"""
    basedir = "/mesonet/data/prism"
    outdir = "swatfiles_prism_arealaverage"
    if os.path.isdir(outdir):
        print("ABORT: as %s exists" % (outdir, ))
        return
    os.mkdir(outdir)
    for dirname in ['precipitation', 'temperature']:
        os.mkdir("%s/%s" % (outdir, dirname))
    pgconn = get_dbconn('idep')
    huc8df = gpd.GeoDataFrame.from_postgis("""
    SELECT huc8, ST_Transform(simple_geom, %s) as geo from wbd_huc8
    WHERE swat_use ORDER by huc8
    """, pgconn, params=(PROJSTR,), index_col='huc8', geom_col='geo')
    hucs = huc8df.index.values
    years = range(1981, 2018)
    nc = netCDF4.Dataset("%s/%s_daily.nc" % (basedir, years[0]))

    # compute the affine
    ncaffine = Affine(nc.variables['lon'][1] - nc.variables['lon'][0],
                      0.,
                      nc.variables['lon'][0],
                      0.,
                      nc.variables['lat'][0] - nc.variables['lat'][1],
                      nc.variables['lat'][-1])
    czs = CachingZonalStats(ncaffine)
    nc.close()

    fps = []
    for year in years:
        nc = netCDF4.Dataset("%s/%s_daily.nc" % (basedir, year))
        basedate, timesz = get_basedate(nc)
        for i in tqdm(range(timesz), desc=str(year)):
            # date = basedate + datetime.timedelta(days=i)

            # keep array logic in top-down order
            tasmax = np.flipud(nc.variables['tmax'][i, :, :])
            tasmin = np.flipud(nc.variables['tmin'][i, :, :])
            pr = np.flipud(nc.variables['ppt'][i, :, :])
            mytasmax = czs.gen_stats(tasmax, huc8df['geo'])
            mytasmin = czs.gen_stats(tasmin, huc8df['geo'])
            mypr = czs.gen_stats(pr, huc8df['geo'])
            for j, huc12 in enumerate(hucs):
                if i == 0 and year == years[0]:
                    fps.append([open(('%s/precipitation/P%s.txt'
                                      ) % (outdir, huc12), 'w'),
                                open(('%s/temperature/T%s.txt'
                                      ) % (outdir, huc12), 'w')])
                    fps[j][0].write("%s\n" % (basedate.strftime("%Y%m%d"), ))
                    fps[j][1].write("%s\n" % (basedate.strftime("%Y%m%d"), ))

                fps[j][0].write(("%.1f\n"
                                 ) % (mypr[j], ))
                fps[j][1].write(("%.2f,%.2f\n"
                                 ) % (mytasmax[j], mytasmin[j]))

    for fp in fps:
        fp[0].close()
        fp[1].close()
Beispiel #29
0
def main():
    """Go Main Go"""
    pgconn = get_dbconn('scan')
    for station in ['S2004', 'S2196', 'S2002', 'S2072', 'S2068',
                    'S2031', 'S2001', 'S2047']:
        df = read_sql("""
        select extract(year from valid + '2 months'::interval) as wy,
        tmpf, dwpf from alldata where station = %s and tmpf is not null
        and dwpf is not null
        """, pgconn, params=(station, ), index_col=None)
        df['mixingratio'] = meteorology.mixing_ratio(
            temperature(df['dwpf'].values, 'F')).value('KG/KG')
        df['vapor_pressure'] = mcalc.vapor_pressure(
            1000. * units.mbar,
            df['mixingratio'].values * units('kg/kg')).to(units('kPa'))
        df['saturation_mixingratio'] = (
            meteorology.mixing_ratio(
                temperature(df['tmpf'].values, 'F')).value('KG/KG'))
        df['saturation_vapor_pressure'] = mcalc.vapor_pressure(
            1000. * units.mbar,
            df['saturation_mixingratio'].values * units('kg/kg')).to(units('kPa'))
        df['vpd'] = df['saturation_vapor_pressure'] - df['vapor_pressure']
        means = df.groupby('wy').mean()
        counts = df.groupby('wy').count()
        for yr, row in means.iterrows():
            print(("%s,%s,%.0f,%.3f"
                   ) % (yr, station, counts.at[yr, 'vpd'], row['vpd']))
Beispiel #30
0
def main():
    """Go"""
    pgconn = get_dbconn('iem')
    cursor = pgconn.cursor()

    cursor.execute("""
    with data as (
        select distinct t.iemid,
        date_trunc('hour', valid + '10 minutes'::interval) as v from
        current_log c JOIN stations t on (c.iemid = t.iemid)
        where raw ~* ' FU ' and t.country = 'US')
    SELECT v, count(*) from data GROUP by v ORDER by v ASC
    """)
    xs = []
    ys = []
    for row in cursor:
        xs.append(row[0])
        ys.append(row[1])

    (fig, ax) = plt.subplots(1, 1)
    ax.bar(xs, ys, width=(1./24.))
    ax.grid(True)
    ax.set_ylabel("Number of ASOS/METAR Sites")
    ax.set_xlabel("3-5 July 2017 Valid Time (Central Daylight Time)")
    ax.set_title(("Number of ASOS/METAR Sites Reporting Smoke (FU)\n"
                  "based on METAR reports for the United States processed by IEM"))
    ax.xaxis.set_major_formatter(mdates.DateFormatter('%-I %p\n%-d %b',
                                                      tz=pytz.timezone("America/Chicago")))
    ax.set_position([0.1, 0.15, 0.8, 0.75])
    fig.savefig('test.png')
Beispiel #31
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos', user='******')

    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['zstation']
    month = int(ctx['month'])
    thres = ctx['t']
    mydir = ctx['dir']

    tzname = ctx['_nt'].sts[station]['tzname']

    df = read_sql("""
    WITH data as (
        SELECT valid at time zone %s  + '10 minutes'::interval as v, tmpf
        from alldata where station = %s and tmpf > -90 and tmpf < 150
        and extract(month from valid) = %s and report_type = 2)

    SELECT extract(hour from v) as hour,
    min(v) as min_valid, max(v) as max_valid,
    sum(case when tmpf::int < %s THEN 1 ELSE 0 END) as below,
    sum(case when tmpf::int >= %s THEN 1 ELSE 0 END) as above,
    count(*) from data
    GROUP by hour ORDER by hour ASC
    """,
                  pgconn,
                  params=(tzname, station, month, thres, thres),
                  index_col='hour')
    if df.empty:
        raise NoDataFound("No data found.")

    df['below_freq'] = df['below'].values.astype('f') / df['count'] * 100.
    df['above_freq'] = df['above'].values.astype('f') / df['count'] * 100.

    freq = df[mydir + "_freq"].values
    hours = df.index.values

    (fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
    bars = ax.bar(hours, freq, fc='blue', align='center')
    for i, mybar in enumerate(bars):
        ax.text(i,
                mybar.get_height() + 3,
                "%.0f" % (mybar.get_height(), ),
                ha='center',
                fontsize=10)
    ax.set_xticks(range(0, 25, 3))
    ax.set_xticklabels(
        ['Mid', '3 AM', '6 AM', '9 AM', 'Noon', '3 PM', '6 PM', '9 PM'])
    ax.grid(True)
    ax.set_ylim(0, 100)
    ax.set_yticks([0, 25, 50, 75, 100])
    ax.set_ylabel("Frequency [%]")
    ax.set_xlabel("Hour Timezone: %s" % (tzname, ))
    ax.set_xlim(-0.5, 23.5)
    ax.set_title(("(%s - %s) %s [%s]\n"
                  r"Frequency of %s Hour, %s: %s$^\circ$F") %
                 (df['min_valid'].min().year, df['max_valid'].max().year,
                  ctx['_nt'].sts[station]['name'], station,
                  calendar.month_name[month], PDICT[mydir], thres))

    return fig, df
Beispiel #32
0
def plotter(fdict):
    """ Go """
    import matplotlib
    matplotlib.use('agg')
    import matplotlib.pyplot as plt
    pgconn = get_dbconn('asos')
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['zstation']
    network = ctx['network']
    hours = ctx['hours']
    interval = ctx['interval']
    if interval > 10 or interval < 0.1:
        return "Invalid interval provided, positive number less than 10"

    nt = NetworkTable(network)

    cursor.execute(
        """
    WITH one as (
        select valid, tmpf::int as t from alldata where
        station = %s and tmpf is not null and tmpf > -50
        ),
        two as (SELECT valid + '%s hours'::interval as v, t from one
        )

    SELECT extract(week from one.valid), two.t - one.t
    from one JOIN two on (one.valid = two.v)
    """, (station, hours))
    weeks = []
    deltas = []
    for row in cursor:
        weeks.append(row[0])
        deltas.append(float(row[1]))

    sts = datetime.datetime(2012, 1, 1)
    xticks = []
    for i in range(1, 13):
        ts = sts.replace(month=i)
        xticks.append(int(ts.strftime("%j")))

    # We want bins centered on zero
    bins = compute_bins(interval)

    hist, xedges, yedges = np.histogram2d(weeks, deltas, [range(0, 54), bins])
    years = float(datetime.datetime.now().year -
                  nt.sts[station]['archive_begin'].year)
    hist = np.ma.array(hist / years / 7.0)
    hist.mask = np.where(hist < (1. / years), True, False)

    (fig, ax) = plt.subplots(1, 1)
    res = ax.pcolormesh((xedges - 1) * 7,
                        yedges,
                        hist.transpose(),
                        cmap=plt.get_cmap('spectral'))
    fig.colorbar(res, label="Hours per Day")
    ax.grid(True)
    ax.set_title(("%s [%s]\nHistogram (bin=%s$^\circ$F) "
                  "of %s Hour Temperature Change") %
                 (nt.sts[station]['name'], station, interval, hours))
    ax.set_ylabel("Temperature Change $^{\circ}\mathrm{F}$")

    ax.set_xticks(xticks)
    ax.set_xticklabels(calendar.month_abbr[1:])
    ax.set_xlim(0, 366)

    rng = max([max(deltas), 0 - min(deltas)])
    ax.set_ylim(0 - rng - 4, rng + 4)

    return fig
Beispiel #33
0
def plotter(fdict):
    """ Go """
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["station"]
    days = ctx["days"]
    days2 = ctx["days2"]
    _days2 = days2 if days2 > 0 else 1
    days3 = ctx["days3"]
    _days3 = days3 if days3 > 0 else 1
    sts = ctx["sdate"]
    ets = ctx["edate"]
    yrrange = ets.year - sts.year
    year2 = ctx.get("year2")  # could be null!
    year3 = ctx.get("year3")  # could be null!
    pgconn = get_dbconn("coop")

    table = "alldata_%s" % (station[:2], )
    df = read_sql(
        """
    WITH agg as (
        SELECT o.day, o.sday,
        avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt,
        sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump,
        count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt,
        avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt2,
        sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump2,
        count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt2,
        avg(high) OVER (ORDER by day ASC ROWS %s PRECEDING) as avgt3,
        sum(precip) OVER (ORDER by day ASC ROWS %s PRECEDING) as sump3,
        count(*) OVER (ORDER by day ASC ROWS %s PRECEDING) as cnt3
        from """ + table + """ o WHERE station = %s),
    agg2 as (
        SELECT sday,
        avg(avgt) as avg_avgt, stddev(avgt) as std_avgt,
        avg(sump) as avg_sump, stddev(sump) as std_sump,
        avg(avgt2) as avg_avgt2, stddev(avgt2) as std_avgt2,
        avg(sump2) as avg_sump2, stddev(sump2) as std_sump2,
        avg(avgt3) as avg_avgt3, stddev(avgt3) as std_avgt3,
        avg(sump3) as avg_sump3, stddev(sump3) as std_sump3
        from agg WHERE cnt = %s GROUP by sday)

    SELECT day,
    (a.avgt - b.avg_avgt) / b.std_avgt as t,
    (a.sump - b.avg_sump) / b.std_sump as p,
    (a.avgt2 - b.avg_avgt2) / b.std_avgt2 as t2,
    (a.sump2 - b.avg_sump2) / b.std_sump2 as p2,
    (a.avgt3 - b.avg_avgt3) / b.std_avgt3 as t3,
    (a.sump3 - b.avg_sump3) / b.std_sump3 as p3
    from agg a JOIN agg2 b on (a.sday = b.sday)
    ORDER by day ASC
    """,
        pgconn,
        params=(
            days - 1,
            days - 1,
            days - 1,
            _days2 - 1,
            _days2 - 1,
            _days2 - 1,
            _days3 - 1,
            _days3 - 1,
            _days3 - 1,
            station,
            days,
        ),
        index_col="day",
    )
    if df.empty:
        raise NoDataFound("No Data Found.")
    df["arridity"] = df["t"] - df["p"]
    df["arridity2"] = df["t2"] - df["p2"]
    df["arridity3"] = df["t3"] - df["p3"]
    (fig, ax) = plt.subplots(1, 1)

    if year2 is None:
        df2 = df.loc[sts:ets]
        ax.plot(
            df2.index.values,
            df2["arridity"],
            color="r",
            lw=2,
            label="%s days" % (days, ),
        )
        maxval = df2["arridity"].abs().max() + 0.25
        if days2 > 0:
            ax.plot(
                df2.index.values,
                df2["arridity2"],
                color="b",
                lw=2,
                label="%s days" % (days2, ),
            )
            maxval = max([maxval, df2["arridity2"].abs().max() + 0.25])
        if days3 > 0:
            ax.plot(
                df2.index.values,
                df2["arridity3"],
                color="g",
                lw=2,
                label="%s days" % (days3, ),
            )
            maxval = max([maxval, df2["arridity3"].abs().max() + 0.25])
        ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d %b\n%Y"))
        title = ""
    else:
        df2 = df.loc[sts:ets]
        ax.plot(
            np.arange(len(df2.index)),
            df2["arridity"],
            color="r",
            lw=2,
            label="%s" % (ets.year, ),
        )
        maxval = df2["arridity"].abs().max() + 0.25
        if year2 is not None:
            sts2 = sts.replace(year=(year2 - yrrange))
            ets2 = ets.replace(year=year2)
            xticks = []
            xticklabels = []
            now = sts2
            i = 0
            while now < ets2:
                if now.day == 1:
                    xticks.append(i)
                    xticklabels.append(now.strftime("%b"))
                i += 1
                now += datetime.timedelta(days=1)
            ax.set_xticks(xticks)
            ax.set_xticklabels(xticklabels)
            df2 = df.loc[sts2:ets2]
            ax.plot(
                np.arange(len(df2.index)),
                df2["arridity"],
                color="b",
                lw=2,
                label="%s" % (year2, ),
            )
            maxval = max([maxval, df2["arridity"].abs().max() + 0.25])
        if year3 is not None:
            sts2 = sts.replace(year=(year3 - yrrange))
            ets2 = ets.replace(year=year3)
            df2 = df.loc[sts2:ets2]
            ax.plot(
                np.arange(len(df2.index)),
                df2["arridity"],
                color="g",
                lw=2,
                label="%s" % (year3, ),
            )
            maxval = max([maxval, df2["arridity"].abs().max() + 0.25])

        # Compute year of best fit
        arridity = df.loc[sts:ets, "arridity"].values
        mae = 100
        useyear = None
        for _year in range(1951, datetime.date.today().year + 1):
            if _year == ets.year:
                continue
            sts2 = sts.replace(year=(_year - yrrange))
            ets2 = ets.replace(year=_year)
            arridity2 = df.loc[sts2:ets2, "arridity"].values
            sz = min([len(arridity2), len(arridity)])
            error = (np.mean((arridity2[:sz] - arridity[:sz])**2))**0.5
            if error < mae:
                mae = error
                useyear = _year
        if useyear:
            sts2 = sts.replace(year=(useyear - yrrange))
            ets2 = ets.replace(year=useyear)
            df2 = df.loc[sts2:ets2]
            ax.plot(
                np.arange(len(df2.index)),
                df2["arridity"],
                color="k",
                lw=2,
                label="%s (%s best match)" % (useyear, ets.year),
            )
            maxval = max([maxval, df2["arridity"].abs().max() + 0.25])
        title = "%s Day" % (days, )
        ax.set_xlabel("%s to %s" %
                      (sts.strftime("%-d %b"), ets.strftime("%-d %b")))
    ax.grid(True)
    ax.set_title(("%s [%s] %s Arridity Index\n"
                  "Std. High Temp Departure minus Std. Precip Departure") %
                 (ctx["_nt"].sts[station]["name"], station, title))
    ax.set_ylim(0 - maxval, maxval)
    ax.set_ylabel("Arridity Index")
    ax.text(
        1.01,
        0.75,
        "<-- More Water Stress",
        ha="left",
        va="center",
        transform=ax.transAxes,
        rotation=-90,
    )
    ax.text(
        1.01,
        0.25,
        "Less Water Stress -->",
        ha="left",
        va="center",
        transform=ax.transAxes,
        rotation=-90,
    )
    ax.legend(ncol=4, loc="best", fontsize=10)
    return fig, df
Beispiel #34
0
def plotter(fdict):
    """ Go """
    from seaborn import heatmap

    ctx = get_autoplot_context(fdict, get_description())
    pgconn = get_dbconn("coop")
    table = "alldata_%s" % (ctx["station"][:2], )
    df = read_sql(
        """
        select day, sday, precip, high,
        extract(doy from day)::int as doy, year
        from """ + table + """  WHERE
        station = %s ORDER by day ASC
    """,
        pgconn,
        params=(ctx["station"], ),
        index_col="day",
        parse_dates="day",
    )
    if df.empty:
        raise NoDataFound("Did not find any data for station!")
    if ctx["var"] == "trail_precip_percent":
        climo = df[["precip", "sday"]].groupby("sday").mean()
        df["precip_avg"] = df.merge(climo,
                                    left_on="sday",
                                    right_index=True,
                                    suffixes=("", "_avg"))["precip_avg"]
        df["trail_precip_percent"] = (
            df["precip"].rolling(ctx["days"]).sum() /
            df["precip_avg"].rolling(ctx["days"]).sum() * 100.0)
        levels = [0, 25, 50, 75, 100, 150, 200, 250, 300]
        label = "Percent"
    elif ctx["var"] == "daily_high_depart":
        climo = df[["high", "sday"]].groupby("sday").mean()
        df["high_avg"] = df.merge(climo,
                                  left_on="sday",
                                  right_index=True,
                                  suffixes=("", "_avg"))["high_avg"]
        df["daily_high_depart"] = df["high"] - df["high_avg"]
        levels = list(range(-20, 21, 4))
        label = "Temperature [F] Departure"

    baseyear = max([df["year"].min(), ctx["syear"]])
    endyear = min([df["year"].max(), ctx["eyear"]])
    years = endyear - baseyear + 1
    cmap = get_cmap(ctx["cmap"])
    norm = mpcolors.BoundaryNorm(levels, cmap.N)
    data = np.full((years, 366), np.nan)
    df2 = df[(df["year"] >= baseyear) & (df["year"] <= endyear)]
    for day, row in df2.iterrows():
        data[day.year - baseyear, row["doy"] - 1] = row[ctx["var"]]

    fig, ax = plt.subplots(1, 1)
    heatmap(
        data,
        cmap=cmap,
        norm=norm,
        ax=ax,
        cbar_kws={
            "spacing": "proportional",
            "label": label
        },
    )
    ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
    ax.set_xticklabels(calendar.month_abbr[1:], rotation=0)
    yticks = []
    yticklabels = []
    delta = 5 if (endyear - baseyear) < 30 else 10
    for i, year in enumerate(range(baseyear, endyear + 1)):
        if year % delta == 0:
            yticks.append(i + 0.5)
            yticklabels.append(year)
    ax.set_yticks(yticks[::-1])
    ax.set_yticklabels(yticklabels[::-1], rotation=0)
    ax.xaxis.grid(True, color="k")
    ax.set_title("[%s] %s (%s-%s)\n%s" % (
        ctx["station"],
        ctx["_nt"].sts[ctx["station"]]["name"],
        ctx["syear"],
        ctx["eyear"],
        PDICT[ctx["var"]].replace("XX", str(ctx["days"])),
    ))

    return fig, df
Beispiel #35
0
def fix_nulls():
    """Correct any nulls."""
    pgconn = get_dbconn("isuag")
    cursor = pgconn.cursor()
    cursor2 = pgconn.cursor()

    nt = NetworkTable("ISUSM")
    cursor.execute("""
        SELECT station, valid from sm_daily where (slrmj_tot_qc is null or
        slrmj_tot_qc = 0) and valid > '2019-04-14' ORDER by valid ASC
    """)
    for row in cursor:
        station = row[0]
        v1 = datetime.datetime(row[1].year, row[1].month, row[1].day)
        v2 = v1.replace(hour=23, minute=59)
        cursor2.execute(
            """
            SELECT sum(slrmj_tot_qc), count(*) from sm_hourly WHERE
            station = %s and valid >= %s and valid < %s
        """,
            (station, v1, v2),
        )
        row2 = cursor2.fetchone()
        if row2[0] is None or row2[0] < 0.01:
            LOG.info(
                "%s %s summed %s hourly for solar, using IEMRE",
                station,
                v1.strftime("%d %b %Y"),
                row2[0],
            )
            if station not in nt.sts:
                LOG.info("unknown station %s metadata, skipping", station)
                continue
            # Go fetch me the IEMRE value!
            uri = ("http://iem.local/iemre/daily/%s/%.2f/%.2f/json") % (
                v1.strftime("%Y-%m-%d"),
                nt.sts[station]["lat"],
                nt.sts[station]["lon"],
            )
            res = requests.get(uri)
            if res.status_code != 200:
                LOG.info("Fix solar got %s from %s", res.status_code, uri)
                continue
            j = json.loads(res.content)
            if not j["data"]:
                LOG.info(
                    "fix solar: No data for %s %s",
                    station,
                    v1.strftime("%d %b %Y"),
                )
                continue
            row2 = [j["data"][0]["srad_mj"], -1]
        if row2[0] is None or row2[0] < 0.01:
            LOG.info("Triple! Failure %s %s", station, v1.strftime("%d %b %Y"))
            continue
        LOG.info(
            "%s %s -> %.2f (%s obs)",
            station,
            v1.strftime("%d %b %Y"),
            row2[0],
            row2[1],
        )
        cursor2.execute(
            """
            UPDATE sm_daily SET slrmj_tot_qc = %s
            WHERE station = %s and valid = %s
        """,
            (row2[0], station, row[1]),
        )

    cursor2.close()
    pgconn.commit()
Beispiel #36
0
def do_ugc(ctx):
    pgconn = get_dbconn('postgis')
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    varname = ctx['v']
    station = ctx['station'][:4]
    state = ctx['state']
    phenomena = ctx['phenomena']
    significance = ctx['significance']
    t = ctx['t']
    sdate = ctx['sdate']
    edate = ctx['edate']
    year = ctx['year']
    year2 = ctx['year2']
    if varname == 'lastyear':
        if t == 'cwa':
            cursor.execute("""
            select ugc, max(issue at time zone 'UTC') from warnings
            WHERE wfo = %s and phenomena = %s and significance = %s
            GROUP by ugc
            """, (station if len(station) == 3 else station[1:],
                  phenomena, significance))
        else:
            cursor.execute("""
            select ugc, max(issue at time zone 'UTC') from warnings
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s GROUP by ugc
            """, (state, phenomena, significance))
        rows = []
        data = {}
        for row in cursor:
            rows.append(dict(valid=row[1],
                             year=row[1].year,
                             ugc=row[0]))
            data[row[0]] = row[1].year
        ctx['title'] = "Year of Last"
        datavar = "year"
    elif varname == 'yearcount':
        table = "warnings_%s" % (year, )
        if t == 'cwa':
            cursor.execute("""
            select ugc, count(*) from """ + table + """
            WHERE wfo = %s and phenomena = %s and significance = %s
            GROUP by ugc
            """, (station if len(station) == 3 else station[1:],
                  phenomena, significance))
        else:
            cursor.execute("""
            select ugc, count(*) from """ + table + """
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s GROUP by ugc
            """, (state, phenomena, significance))
        rows = []
        data = {}
        for row in cursor:
            rows.append(dict(count=row[1], year=year,
                             ugc=row[0]))
            data[row[0]] = row[1]
        ctx['title'] = "Count for %s" % (year,)
        datavar = "count"
    elif varname == 'total':
        table = "warnings"
        if t == 'cwa':
            cursor.execute("""
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from """ + table + """
            WHERE wfo = %s and phenomena = %s and significance = %s
            and issue >= %s and issue <= %s
            GROUP by ugc
            """, (station if len(station) == 3 else station[1:],
                  phenomena, significance,
                  sdate, edate))
        else:
            cursor.execute("""
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from """ + table + """
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s and issue >= %s and issue < %s
            GROUP by ugc
            """, (state, phenomena, significance,
                  sdate, edate))
        rows = []
        data = {}
        for row in cursor:
            rows.append(dict(count=row[1], year=year,
                             ugc=row[0], minissue=row[2], maxissue=row[3]))
            data[row[0]] = row[1]
        ctx['title'] = "Total"
        ctx['subtitle'] = (" between %s and %s UTC"
                           ) % (sdate.strftime("%d %b %Y %H%M"),
                                edate.strftime("%d %b %Y %H%M"))
        datavar = "count"
    elif varname == 'yearavg':
        table = "warnings"
        if t == 'cwa':
            cursor.execute("""
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from """ + table + """
            WHERE wfo = %s and phenomena = %s and significance = %s
            and issue >= %s and issue <= %s
            GROUP by ugc
            """, (station if len(station) == 3 else station[1:],
                  phenomena, significance,
                  datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1)))
        else:
            cursor.execute("""
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from """ + table + """
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s and issue >= %s and issue < %s
            GROUP by ugc
            """, (state, phenomena, significance,
                  datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1)))
        rows = []
        data = {}
        minv = datetime.datetime(2050, 1, 1)
        maxv = datetime.datetime(1986, 1, 1)
        for row in cursor:
            if row[2] < minv:
                minv = row[2]
            if row[3] > maxv:
                maxv = row[3]
            rows.append(dict(count=row[1], year=year,
                             ugc=row[0], minissue=row[2], maxissue=row[3]))
            data[row[0]] = row[1]
        ctx['title'] = ("Yearly Avg: %s and %s"
                        ) % (minv.strftime("%d %b %Y"),
                             maxv.strftime("%d %b %Y"))
        datavar = "average"

    if not rows:
        raise ValueError("Sorry, no data found for query!")
    df = pd.DataFrame(rows)
    if varname == 'yearavg':
        years = maxv.year - minv.year + 1
        df['average'] = df['count'] / years
        for key in data:
            data[key] = round(data[key] / float(years), 2)
        maxv = df[datavar].max()
        for delta in [500, 50, 5, 1, 0.5, 0.05]:
            bins = np.arange(0, (maxv + 1.) * 1.05, delta)
            if len(bins) > 8:
                break
        if len(bins) > 8:
            bins = bins[::int(len(bins) / 8.)]
        bins[0] = 0.01
    else:
        bins = range(np.min(df[datavar][:]), np.max(df[datavar][:])+2, 1)
        if len(bins) < 3:
            bins.append(bins[-1]+1)
        if len(bins) > 8:
            bins = np.linspace(np.min(df[datavar][:]),
                               np.max(df[datavar][:])+2, 8, dtype='i')
    ctx['bins'] = bins
    ctx['data'] = data
    ctx['df'] = df
Beispiel #37
0
"""Run to ingest gempak files from mtarchive"""
import subprocess
import datetime

import requests
import pytz
from ingest_from_rucsoundings import RAOB
from pyiem.util import get_dbconn

POSTGIS = get_dbconn('postgis')


def conv(raw):
    if float(raw) < -9998:
        return None
    return float(raw)


def conv_speed(raw):
    ''' convert sped to mps units '''
    if raw in ['99999', '-9999.00']:
        return None
    return float(raw) * 0.5144


sts = datetime.datetime(1989, 9, 25)
ets = datetime.datetime(1989, 9, 26)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
    print now
Beispiel #38
0
def run(station, syear, eyear):
    """Do something"""
    pgconn = get_dbconn("coop")
    cursor = pgconn.cursor()

    table = "alldata_%s" % (station[:2],)
    cursor.execute(
        f"""
    WITH data as (
      SELECT sday, year, precip,
      avg(precip) OVER (PARTITION by sday) as avg_precip,
      high, rank() OVER (PARTITION by sday ORDER by high DESC) as max_high,
      avg(high) OVER (PARTITION by sday) as avg_high,
      rank() OVER (PARTITION by sday ORDER by high ASC) as min_high,
      low, rank() OVER (PARTITION by sday ORDER by low DESC) as max_low,
      avg(low) OVER (PARTITION by sday) as avg_low,
      rank() OVER (PARTITION by sday ORDER by low ASC) as min_low,
      rank() OVER (PARTITION by sday ORDER by precip DESC) as max_precip
      from {table} WHERE station = %s and year >= %s and year < %s),

    max_highs as (
      SELECT sday, high, array_agg(year) as years from data
      where max_high = 1 GROUP by sday, high),
    min_highs as (
      SELECT sday, high, array_agg(year) as years from data
      where min_high = 1 GROUP by sday, high),

    max_lows as (
      SELECT sday, low, array_agg(year) as years from data
      where max_low = 1 GROUP by sday, low),
    min_lows as (
      SELECT sday, low, array_agg(year) as years from data
      where min_low = 1 GROUP by sday, low),

    max_precip as (
      SELECT sday, precip, array_agg(year) as years from data
      where max_precip = 1 GROUP by sday, precip),

    avgs as (
      SELECT sday, count(*) as cnt, max(avg_precip) as p,
      max(avg_high) as h, max(avg_low) as l from data GROUP by sday)

    SELECT a.sday, a.cnt, a.h, xh.high, xh.years,
    nh.high, nh.years, a.l, xl.low, xl.years,
    nl.low, nl.years, a.p, mp.precip, mp.years
    from avgs a, max_highs xh, min_highs nh, max_lows xl, min_lows nl,
    max_precip mp
    WHERE xh.sday = a.sday and xh.sday = nh.sday and xh.sday = xl.sday and
    xh.sday = nl.sday and xh.sday = mp.sday ORDER by sday ASC

    """,
        (station, syear, eyear),
    )
    res = {
        "station": station,
        "start_year": syear,
        "end_year": eyear,
        "climatology": [],
    }
    for row in cursor:
        res["climatology"].append(
            dict(
                month=int(row[0][:2]),
                day=int(row[0][2:]),
                years=row[1],
                avg_high=float(row[2]),
                max_high=row[3],
                max_high_years=row[4],
                min_high=row[5],
                min_high_years=row[6],
                avg_low=float(row[7]),
                max_low=row[8],
                max_low_years=row[9],
                min_low=row[10],
                min_low_years=row[11],
                avg_precip=float(row[12]),
                max_precip=row[13],
                max_precip_years=row[14],
            )
        )

    return json.dumps(res)
Beispiel #39
0
def plotter(fdict):
    """ Go """
    import matplotlib
    matplotlib.use('agg')
    pgconn = get_dbconn('coop')
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['station']

    table = "alldata_%s" % (station[:2], )
    nt = NetworkTable("%sCLIMATE" % (station[:2], ))
    res = ("# IEM Climodat https://mesonet.agron.iastate.edu/climodat/\n"
           "# Report Generated: %s\n"
           "# Climate Record: %s -> %s\n"
           "# Site Information: [%s] %s\n"
           "# Contact Information: Daryl Herzmann "
           "[email protected] 515.294.5978\n") % (
               datetime.date.today().strftime("%d %b %Y"),
               nt.sts[station]['archive_begin'].date(), datetime.date.today(),
               station, nt.sts[station]['name'])
    res += ("# THESE ARE THE HEAT STRESS VARIABLES FOR STATION #  %s\n") % (
        station, )

    s = nt.sts[station]['archive_begin']
    e = datetime.date.today().year + 1

    df = read_sql("""
        SELECT year, month, sum(case when high > 86 then 1 else 0 end) as days,
        sum(case when high > 86 then high - 86 else 0 end) as sdd
        from """ + table + """ WHERE
        station = %s GROUP by year, month
    """,
                  pgconn,
                  params=(station, ),
                  index_col=None)
    sdd = df.pivot('year', 'month', 'sdd')
    days = df.pivot('year', 'month', 'days')
    df = sdd.join(days, lsuffix='sdd', rsuffix='days')

    res += ("             # OF DAYS MAXT >86                     "
            "ACCUMULATED (MAXT - 86 )\n"
            " YEAR   MAY  JUNE  JULY   AUG  SEPT TOTAL      "
            "MAY  JUNE  JULY   AUG  SEPT TOTAL\n")

    yrCnt = 0
    for yr in range(s.year, e):
        yrCnt += 1
        res += ("%5s" % (yr, ))
        total = 0
        for mo in range(5, 10):
            val = df.at[yr, "%sdays" % (mo, )]
            if np.isnan(val):
                res += ("%6s" % ("M", ))
            else:
                res += ("%6i" % (val, ))
                total += val
        res += ("%6i   " % (total, ))
        total = 0
        for mo in range(5, 10):
            val = df.at[yr, "%ssdd" % (mo, )]
            if np.isnan(val):
                res += ("%6s" % ("M", ))
            else:
                res += ("%6i" % (val, ))
                total += val
        res += ("%6i   \n" % (total, ))

    res += (" **************************************************************"
            "************************\n")

    res += ("MEANS")
    tot = 0
    for mo in range(5, 10):
        val = df["%sdays" % (mo, )].mean()
        tot += val
        res += ("%6.1f" % (val, ))
    res += ("%6.1f   " % (tot, ))
    tot = 0
    for mo in range(5, 10):
        val = df["%ssdd" % (mo, )].mean()
        tot += val
        res += ("%6.1f" % (val, ))
    res += ("%6.1f\n" % (tot, ))

    return None, df, res
Beispiel #40
0
def get_context(fdict):
    """ Get the context"""
    pgconn = get_dbconn('coop')
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['station']
    month = ctx['month']
    ptype = ctx['type']
    threshold = ctx['threshold']

    table = "alldata_%s" % (station[:2], )
    nt = network.Table("%sCLIMATE" % (station[:2], ))

    lag = "0 days"
    if month == 'fall':
        months = [9, 10, 11]
        label = "Fall (SON)"
    elif month == 'winter':
        months = [12, 1, 2]
        lag = "31 days"
        label = "Winter (DJF)"
    elif month == 'spring':
        months = [3, 4, 5]
        label = "Spring (MAM)"
    elif month == 'summer':
        months = [6, 7, 8]
        label = "Summer (JJA)"
    else:
        months = [
            int(month),
        ]
        label = calendar.month_name[int(month)]

    df = read_sql("""
    WITH climo as (
        SELECT to_char(valid, 'mmdd') as sday,
        high, low from ncdc_climate81 WHERE station = %s),
    day2day as (
        SELECT
        extract(year from day + '""" + lag + """'::interval)::int as myyear,
        month,
        abs(high - lag(high) OVER (ORDER by day ASC)) as dhigh,
        abs(low - lag(low) OVER (ORDER by day ASC)) as dlow,
    abs((high+low)/2. - lag((high+low)/2.) OVER (ORDER by day ASC)) as dtemp
        from """ + table + """ WHERE station = %s),
    agg as (
        SELECT myyear, avg(dhigh) as dhigh, avg(dlow) as dlow,
        avg(dtemp) as dtemp from day2day WHERE month in %s GROUP by myyear),
    agg2 as (
        SELECT
        extract(year from day + '""" + lag + """'::interval)::int as myyear,
        max(o.high) as "max-high",
        min(o.high) as "min-high",
        avg(o.high) as "avg-high",
        stddev(o.high) as "std-high",
        max(o.low) as "max-low",
        min(o.low) as "min-low",
        avg(o.low) as "avg-low",
        stddev(o.low) as "std-low",
        avg((o.high + o.low)/2.) as "avg-temp",
        stddev((o.high + o.low)/2.) as "std-temp",
        max(o.precip) as "max-precip",
        sum(o.precip) as "sum-precip",
        avg(o.high) - avg(o.low) as "range-avghi-avglo",
        sum(case when o.high >= %s then 1 else 0 end) as "days-high-above",
        sum(case when o.high < %s then 1 else 0 end) as "days-high-below",
    sum(case when o.high >= c.high then 1 else 0 end) as "days-high-above-avg",
        sum(case when o.low >= %s then 1 else 0 end) as "days-lows-above",
    sum(case when o.low < c.low then 1 else 0 end) as "days-lows-below-avg",
        sum(case when o.low < %s then 1 else 0 end) as "days-lows-below"
        from """ + table + """ o JOIN climo c on (o.sday = c.sday)
      where station = %s and month in %s GROUP by myyear)

    SELECT b.*, a.dhigh as "delta-high", a.dlow as "delta-low",
    a.dtemp as "delta_temp" from agg a JOIN agg2 b
    on (a.myyear = b.myyear) ORDER by b.myyear ASC
    """,
                  pgconn,
                  params=(nt.sts[station]['ncdc81'], station, tuple(months),
                          threshold, threshold, threshold, threshold, station,
                          tuple(months)),
                  index_col='myyear')
    if df.empty:
        raise ValueError("No data was found for query")

    # Figure out the max min values to add to the row
    df2 = df[df[ptype] == df[ptype].max()]
    if df2.empty:
        raise ValueError("No data was found for query")
    df = df.dropna()
    xlabel = "Year, Max: %.2f %s%s" % (df[ptype].max(), df2.index.values[0],
                                       '+' if len(df2.index) > 1 else '')
    df2 = df[df[ptype] == df[ptype].min()]
    xlabel += ", Min: %.2f %s%s" % (df[ptype].min(), df2.index.values[0],
                                    '+' if len(df2.index) > 1 else '')
    ctx['xlabel'] = xlabel
    data = df[ptype].values
    ctx['data'] = data
    ctx['avgv'] = df[ptype].mean()
    ctx['df'] = df
    # Compute 30 year trailing average
    tavg = [None] * 30
    for i in range(30, len(data)):
        tavg.append(np.average(data[i - 30:i]))
    ctx['tavg'] = tavg
    # End interval is inclusive
    ctx['a1981_2010'] = df.loc[1981:2010, ptype].mean()
    ctx['ptype'] = ptype
    ctx['station'] = station
    ctx['threshold'] = threshold
    ctx['month'] = month
    ctx['title'] = ("[%s] %s %s-%s") % (station, nt.sts[station]['name'],
                                        df.index.min(), df.index.max())
    ctx['subtitle'] = ("%s %s") % (label, PDICT[ptype])
    if ptype.find("days") == 0 and ptype.find('avg') == -1:
        ctx['subtitle'] += " (%s)" % (threshold, )

    units = r"$^\circ$F"
    if ctx['ptype'].find('precip') > 0:
        units = "inches"
    elif ctx['ptype'].find('days') > 0:
        units = "days"
    ctx['ylabel'] = "%s [%s]" % (PDICT[ctx['ptype']], units)
    return ctx
Beispiel #41
0
def run(ctx, start_response):
    """Go run!"""
    pgconn = get_dbconn("postgis", user="******")
    cursor = pgconn.cursor()

    if (ctx["ets"] - ctx["sts"]).days > 120:
        ctx["ets"] = ctx["sts"] + datetime.timedelta(days=120)

    sql = """
        SELECT to_char(valid at time zone 'UTC', 'YYYYMMDDHH24MI') as utctime,
        case when is_urgent then 'T' else 'F' end,
        substr(aircraft_type, 0, 40), substr(report, 0, 255),
        ST_y(geom::geometry) as lat, ST_x(geom::geometry) as lon
        from pireps WHERE
        valid >= '%s' and valid < '%s'  ORDER by valid ASC
        """ % (
        ctx["sts"].strftime("%Y-%m-%d %H:%M+00"),
        ctx["ets"].strftime("%Y-%m-%d %H:%M+00"),
    )

    cursor.execute(sql)
    if cursor.rowcount == 0:
        start_response("200 OK", [("Content-type", "text/plain")])
        return b"ERROR: no results found for your query"

    fn = "pireps_%s_%s" % (
        ctx["sts"].strftime("%Y%m%d%H%M"),
        ctx["ets"].strftime("%Y%m%d%H%M"),
    )

    # sys.stderr.write("End SQL with rowcount %s" % (cursor.rowcount, ))
    if ctx["fmt"] == "csv":
        sio = StringIO()
        headers = [
            ("Content-type", "application/octet-stream"),
            ("Content-Disposition", "attachment; filename=%s.csv" % (fn, )),
        ]
        start_response("200 OK", headers)
        sio.write("VALID,URGENT,AIRCRAFT,REPORT,LAT,LON\n")
        for row in cursor:
            sio.write(",".join([str(s) for s in row]) + "\n")
        return sio.getvalue().encode("ascii", "ignore")

    shpio = BytesIO()
    shxio = BytesIO()
    dbfio = BytesIO()

    with shapefile.Writer(shx=shxio, dbf=dbfio, shp=shpio) as shp:
        shp.field("VALID", "C", 12)
        shp.field("URGENT", "C", 1)
        shp.field("AIRCRAFT", "C", 40)
        shp.field("REPORT", "C", 255)  # Max field size is 255
        shp.field("LAT", "F", 7, 4)
        shp.field("LON", "F", 9, 4)
        for row in cursor:
            shp.point(row[-1], row[-2])
            shp.record(*row)

    zio = BytesIO()
    with zipfile.ZipFile(zio, mode="w",
                         compression=zipfile.ZIP_DEFLATED) as zf:
        zf.writestr(fn + ".prj",
                    open(("/opt/iem/data/gis/meta/4326.prj")).read())
        zf.writestr(fn + ".shp", shpio.getvalue())
        zf.writestr(fn + ".shx", shxio.getvalue())
        zf.writestr(fn + ".dbf", dbfio.getvalue())
    headers = [
        ("Content-type", "application/octet-stream"),
        ("Content-Disposition", "attachment; filename=%s.zip" % (fn, )),
    ]
    start_response("200 OK", headers)
    return zio.getvalue()
Beispiel #42
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("iem")
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["station"]
    days = ctx["days"]
    sts = datetime.date(2012, ctx["month"], ctx["day"])
    ets = sts + datetime.timedelta(days=(days - 1))
    varname = ctx["varname"]
    year = ctx["year"]
    sdays = []
    for i in range(days):
        ts = sts + datetime.timedelta(days=i)
        sdays.append(ts.strftime("%m%d"))

    doff = (days + 1) if ets.year != sts.year else 0
    df = read_sql(
        """
    SELECT extract(year from day - '""" + str(doff) +
        """ days'::interval) as yr,
    avg((max_tmpf+min_tmpf)/2.) as avg_temp, avg(max_tmpf) as avg_high_temp,
    avg(min_tmpf) as avg_low_temp,
    sum(pday) as precip, avg(avg_sknt) * 1.15 as avg_wind_speed,
    min(min_tmpf) as min_low,
    max(min_tmpf) as max_low,
    max(max_tmpf) as max_high,
    avg((max_dwpf + min_dwpf)/2.) as avg_dewp
    from summary s JOIN stations t on (s.iemid = t.iemid)
    WHERE t.network = %s and t.id = %s and to_char(day, 'mmdd') in %s
    GROUP by yr ORDER by yr ASC
    """,
        pgconn,
        params=(ctx["network"], station, tuple(sdays)),
    )
    if df.empty:
        raise NoDataFound("No data was found.")

    (fig, ax) = plt.subplots(2, 1, figsize=(8, 6))

    bars = ax[0].bar(df["yr"],
                     df[varname],
                     facecolor="r",
                     edgecolor="r",
                     align="center")
    thisvalue = "M"
    for mybar, x, y in zip(bars, df["yr"], df[varname]):
        if x == year:
            mybar.set_facecolor("g")
            mybar.set_edgecolor("g")
            thisvalue = y
    ax[0].set_xlabel("Year, %s = %s" % (year, nice(thisvalue)))
    ax[0].axhline(df[varname].mean(),
                  lw=2,
                  label="Avg: %.2f" % (df[varname].mean(), ))
    ylabel = r"Temperature $^\circ$F"
    if varname in ["precip"]:
        ylabel = "Precipitation [inch]"
    elif varname in ["avg_wind_speed"]:
        ylabel = "Wind Speed [MPH]"
    ax[0].set_ylabel(ylabel)
    ax[0].set_title(("[%s] %s\n%s from %s through %s") % (
        station,
        ctx["_nt"].sts[station]["name"],
        PDICT.get(varname),
        sts.strftime("%d %b"),
        ets.strftime("%d %b"),
    ))
    ax[0].grid(True)
    ax[0].legend(ncol=2, fontsize=10)
    ax[0].set_xlim(df["yr"].min() - 1, df["yr"].max() + 1)
    dy = df[varname].max() - df[varname].min()
    ax[0].set_ylim(df[varname].min() - dy * 0.2, df[varname].max() + dy * 0.25)
    box = ax[0].get_position()
    ax[0].set_position([box.x0, box.y0 + 0.02, box.width, box.height * 0.98])

    # Plot 2: CDF
    vals = df[pd.notnull(df[varname])][varname]
    X2 = np.sort(vals)
    ptile = np.percentile(vals, [0, 5, 50, 95, 100])
    N = len(vals)
    F2 = np.array(range(N)) / float(N) * 100.0
    ax[1].plot(X2, 100.0 - F2)
    ax[1].set_xlabel("based on summarized hourly reports, %s" % (ylabel, ))
    ax[1].set_ylabel("Observed Frequency [%]")
    ax[1].grid(True)
    ax[1].set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
    mysort = df.sort_values(by=varname, ascending=True)
    info = ("Min: %.2f %.0f\n95th: %.2f\nMean: %.2f\nSTD: %.2f\n5th: %.2f\n"
            "Max: %.2f %.0f") % (
                df[varname].min(),
                df["yr"][mysort.index[0]],
                ptile[1],
                df[varname].mean(),
                df[varname].std(),
                ptile[3],
                df[varname].max(),
                df["yr"][mysort.index[-1]],
            )
    ax[1].axvline(thisvalue, lw=2, color="g")
    ax[1].text(
        0.8,
        0.95,
        info,
        transform=ax[1].transAxes,
        va="top",
        bbox=dict(facecolor="white", edgecolor="k"),
    )
    return fig, df
Beispiel #43
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('coop')
    cursor = pgconn.cursor()
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['station']
    thres1 = ctx['thres1']
    thres2 = ctx['thres2']
    thres3 = ctx['thres3']
    thres4 = ctx['thres4']
    thres = [thres1, thres2, thres3, thres4]

    prs = [make(thres1), make(thres2), make(thres3), make(thres4)]

    table = "alldata_%s" % (station[:2], )
    s = ctx['_nt'].sts[station]['archive_begin']
    e = datetime.date.today()

    if s is None:
        raise NoDataFound("Unknown metadata.")
    res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann [email protected] 515.294.5978
# SEASONAL TEMPERATURE CYCLES PER YEAR
# 1 CYCLE IS A TEMPERATURE VARIATION FROM A VALUE BELOW A THRESHOLD
#   TO A VALUE EXCEEDING A THRESHOLD.  THINK OF IT AS FREEZE/THAW CYCLES
#  FIRST DATA COLUMN WOULD BE FOR CYCLES EXCEEDING 26 AND 38 DEGREES F
THRES  %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f   %2.0f-%2.0f
YEAR   SPRING  FALL    SPRING  FALL    SPRING  FALL    SPRING  FALL
""" % (
        datetime.date.today().strftime("%d %b %Y"),
        s.date(),
        datetime.date.today(),
        station,
        ctx['_nt'].sts[station]['name'],
        prs[0][0],
        prs[0][1],
        prs[0][0],
        prs[0][1],
        prs[1][0],
        prs[1][1],
        prs[1][0],
        prs[1][1],
        prs[2][0],
        prs[2][1],
        prs[2][0],
        prs[2][1],
        prs[3][0],
        prs[3][1],
        prs[3][0],
        prs[3][1],
    )

    df = pd.DataFrame(
        {
            thres1 + 's': 0,
            thres1 + 'f': 0,
            thres2 + 's': 0,
            thres2 + 'f': 0,
            thres3 + 's': 0,
            thres3 + 'f': 0,
            thres4 + 's': 0,
            thres4 + 'f': 0
        },
        index=pd.Series(range(s.year, e.year + 1), name='year'))

    cycle_pos = [-1, -1, -1, -1]

    cursor.execute(
        """
        SELECT day, high, low from """ + table + """
        WHERE station = %s and high is not null and low is not null
        ORDER by day ASC
    """, (station, ))
    for row in cursor:
        ts = row[0]
        high = int(row[1])
        low = int(row[2])

        for i, (lower, upper) in enumerate(prs):
            ckey = thres[i] + ('s' if ts.month < 7 else 'f')

            # cycles lower
            if cycle_pos[i] == 1 and low < lower:
                # print 'Cycled lower', low, ts
                cycle_pos[i] = -1
                df.loc[ts.year, ckey] += 0.5

            # cycled higher
            if cycle_pos[i] == -1 and high > upper:
                # print 'Cycled higher', high, ts
                cycle_pos[i] = 1
                df.loc[ts.year, ckey] += 0.5

    for yr, row in df.iterrows():
        res += ("%s   %-8i%-8i%-8i%-8i%-8i%-8i%-8i%-8i\n") % (
            yr, row[thres1 + 's'], row[thres1 + 'f'], row[thres2 + 's'],
            row[thres2 + 'f'], row[thres3 + 's'], row[thres3 + 'f'],
            row[thres4 + 's'], row[thres4 + 'f'])

    res += ("AVG    %-8.1f%-8.1f%-8.1f%-8.1f%-8.1f%-8.1f%-8.1f%-8.1f\n") % (
        df[thres1 + 's'].mean(), df[thres1 + 'f'].mean(),
        df[thres2 + 's'].mean(), df[thres2 + 'f'].mean(),
        df[thres3 + 's'].mean(), df[thres3 + 'f'].mean(),
        df[thres4 + 's'].mean(), df[thres4 + 'f'].mean())

    return None, df, res
Beispiel #44
0
from pandas.io.sql import read_sql
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn

pgconn = get_dbconn("idep")
df = read_sql(
    """
    SELECT * from results_by_huc12 where scenario in (0, 7, 9)
    and huc_12 = '102300031504' and valid >= '2008-01-01'
    and valid < '2016-01-01' ORDER by valid ASC
    """,
    pgconn,
    index_col=None,
)

(fig, ax) = plt.subplots(1, 1)
for scenario, label in zip([0, 7, 9],
                           ["Baseline", "UCS 4 Year", "UCS 3 Year"]):
    df2 = df[df["scenario"] == scenario]
    x = []
    y = []
    accum = 0
    for i, row in df2.iterrows():
        x.append(row["valid"])
        accum += row["avg_loss"] * 4.463
        y.append(accum)
    ax.plot(x, y, label="%s" % (label, ))

ax.legend(loc="best")
ax.set_title("Accumulated Soil Detachment for 102300031504")
ax.set_ylabel("Soil Detachment [t/a]")
Beispiel #45
0
def do_polygon(ctx):
    """polygon workflow"""
    pgconn = get_dbconn("postgis")
    varname = ctx["v"]
    station = ctx["station"][:4]
    state = ctx["state"]
    phenomena = ctx["phenomena"]
    significance = ctx["significance"]
    t = ctx["t"]
    sdate = ctx["sdate"]
    edate = ctx["edate"]
    year = ctx["year"]
    year2 = ctx["year2"]
    # figure out the start and end timestamps
    if varname == "total":
        sts = sdate
        ets = edate
    elif varname == "hour":
        raise NoDataFound("Sorry, not implemented for polygon summaries.")
    elif varname == "yearcount":
        sts = datetime.datetime(year, 1, 1).replace(tzinfo=pytz.utc)
        ets = datetime.datetime(year, 12, 31, 23, 59).replace(tzinfo=pytz.utc)
    else:
        sts = datetime.datetime(year, 1, 1).replace(tzinfo=pytz.utc)
        ets = datetime.datetime(year2, 12, 31, 23, 59).replace(tzinfo=pytz.utc)
    # We need to figure out how to get the warnings either by state or by wfo
    if t == "cwa":
        (west, south, east, north) = wfo_bounds[station]
    else:
        (west, south, east, north) = state_bounds[state]
    # buffer by 5 degrees so to hopefully get all polys
    (west, south) = [x - 2 for x in (west, south)]
    (east, north) = [x + 2 for x in (east, north)]
    # create grids
    griddelta = 0.01
    lons = np.arange(west, east, griddelta)
    lats = np.arange(south, north, griddelta)
    YSZ = len(lats)
    XSZ = len(lons)
    lons, lats = np.meshgrid(lons, lats)
    affine = Affine(griddelta, 0.0, west, 0.0, 0 - griddelta, north)
    ones = np.ones((int(YSZ), int(XSZ)))
    counts = np.zeros((int(YSZ), int(XSZ)))
    wfolimiter = ""
    if ctx["t"] == "cwa":
        wfolimiter = " wfo = '%s' and " % (station, )
    # do arbitrary buffer to prevent segfaults?
    df = read_postgis(
        """
    SELECT ST_Forcerhr(ST_Buffer(geom, 0.0005)) as geom, issue, expire
     from sbw where """ + wfolimiter + """
     phenomena = %s and status = 'NEW' and significance = %s
     and ST_Within(geom, ST_GeomFromEWKT('SRID=4326;POLYGON((%s %s, %s %s,
     %s %s, %s %s, %s %s))')) and ST_IsValid(geom)
     and issue >= %s and issue <= %s ORDER by issue ASC
    """,
        pgconn,
        params=(
            phenomena,
            significance,
            west,
            south,
            west,
            north,
            east,
            north,
            east,
            south,
            west,
            south,
            sts,
            ets,
        ),
        geom_col="geom",
        index_col=None,
    )
    # print df, sts, ets, west, east, south, north
    zs = zonal_stats(
        df["geom"],
        ones,
        affine=affine,
        nodata=-1,
        all_touched=True,
        raster_out=True,
    )
    for i, z in enumerate(zs):
        aff = z["mini_raster_affine"]
        mywest = aff.c
        mynorth = aff.f
        raster = np.flipud(z["mini_raster_array"])
        x0 = int((mywest - west) / griddelta)
        y1 = int((mynorth - south) / griddelta)
        dy, dx = np.shape(raster)
        x1 = x0 + dx
        y0 = y1 - dy
        if x0 < 0 or x1 >= XSZ or y0 < 0 or y1 >= YSZ:
            # print raster.mask.shape, west, x0, x1, XSZ, north, y0, y1, YSZ
            continue
        if varname == "lastyear":
            counts[y0:y1, x0:x1] = np.where(raster.mask, counts[y0:y1, x0:x1],
                                            df.iloc[i]["issue"].year)
        else:
            counts[y0:y1, x0:x1] += np.where(raster.mask, 0, 1)
    if np.max(counts) == 0:
        raise NoDataFound("Sorry, no data found for query!")
    # construct the df
    ctx["df"] = pd.DataFrame({
        "lat": lats.ravel(),
        "lon": lons.ravel(),
        "val": counts.ravel()
    })
    minv = df["issue"].min()
    maxv = df["issue"].max()
    if varname == "lastyear":
        ctx["title"] = "Year of Last"
        if (maxv.year - minv.year) < 3:
            bins = range(int(minv.year) - 4, int(maxv.year) + 2)
        else:
            bins = range(int(minv.year), int(maxv.year) + 2)
    elif varname == "yearcount":
        ctx["title"] = "Count for %s" % (year, )
    elif varname == "total":
        ctx["title"] = "Total"
        ctx["subtitle"] = (" between %s and %s UTC") % (
            sdate.strftime("%d %b %Y %H%M"),
            edate.strftime("%d %b %Y %H%M"),
        )
    elif varname == "yearavg":
        ctx["title"] = ("Yearly Avg: %s and %s") % (
            minv.strftime("%d %b %Y"),
            maxv.strftime("%d %b %Y"),
        )
        years = (maxv.year - minv.year) + 1
        counts = counts / years

    maxv = np.max(counts)
    if varname != "lastyear":
        if varname == "total":
            if maxv < 8:
                bins = np.arange(1, 8, 1)
            else:
                bins = np.linspace(1, maxv + 3, 10, dtype="i")
        else:
            for delta in [500, 50, 5, 1, 0.5, 0.05]:
                bins = np.arange(0, (maxv + 1.0) * 1.05, delta)
                if len(bins) > 8:
                    break
            bins[0] = 0.01
    ctx["bins"] = bins
    ctx["data"] = counts
    ctx["lats"] = lats
    ctx["lons"] = lons
Beispiel #46
0
from __future__ import print_function
import os
import datetime
import sys
import re

import requests
import pandas as pd
from pandas.io.sql import read_sql
import numpy as np
from pyiem.reference import TRACE_VALUE
from pyiem.datatypes import temperature, distance
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, exponential_backoff

PGCONN = get_dbconn('coop')

BASEDIR = "/mesonet/tmp"
BASE = datetime.date(1850, 1, 1)
TODAY = datetime.date.today()

STCONV = {
    'WA': '45',
    'DE': '07',
    'DC': '18',
    'WI': '47',
    'WV': '46',
    'HI': '51',
    'FL': '08',
    'WY': '48',
    'NH': '27',
Beispiel #47
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('coop')
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx['station']
    ab = ctx['_nt'].sts[station]['archive_begin']
    if ab is None:
        raise NoDataFound("Unknown station metadata.")
    year1 = ctx.get('year1')
    year2 = ctx.get('year2')
    year3 = ctx.get('year3')
    sdate = ctx['sdate']
    table = "alldata_%s" % (station[:2], )
    delta = 1 if sdate.month > 6 else 0
    df = read_sql("""
        WITH years as (
            SELECT distinct year + %s as myyear from """ + table + """
            WHERE station = %s and sday = %s),
        obs as (
            SELECT day, precip,
            case when sday >= %s then year + %s else year end as year
            from """ + table + """ WHERE station = %s and precip is not null
        )
        SELECT day, year, precip,
        row_number() OVER (PARTITION by year ORDER by day ASC) as row,
        sum(precip) OVER (PARTITION by year ORDER by day ASC) as accum from
        obs WHERE year in (select myyear from years)
        ORDER by day ASC
    """,
                  pgconn,
                  params=(delta, station, sdate.strftime("%m%d"),
                          sdate.strftime("%m%d"), delta, station),
                  index_col='day')
    if df.empty:
        raise NoDataFound("No data found!")

    (fig, ax) = plt.subplots(1, 1)
    # Average
    jday = df[['row', 'accum']].groupby('row').mean()
    jday['accum'].values[-1] = jday['accum'].values[-2]
    ax.plot(range(1,
                  len(jday.index) + 1),
            jday['accum'],
            lw=2,
            zorder=5,
            color='k',
            label='Average - %.2f' % (jday['accum'].iloc[-1], ))

    # Min and Max
    jmin = df[['row', 'accum']].groupby('row').min()
    jmax = df[['row', 'accum']].groupby('row').max()
    ax.fill_between(range(1,
                          len(jday.index) + 1),
                    jmin['accum'],
                    jmax['accum'],
                    zorder=2,
                    color='tan')

    # find max year
    plotted = []
    for year, color in zip([
            df['accum'].idxmax().year,
            df[df['row'] == 365]['accum'].idxmin().year, year1, year2, year3
    ], ['b', 'brown', 'r', 'g', 'purple']):
        if year is None or year in plotted:
            continue
        plotted.append(year)
        df2 = df[df['year'] == year]
        ax.plot(range(1,
                      len(df2.index) + 1),
                df2['accum'],
                label='%s - %.2f' % (year, df2['accum'].iloc[-1]),
                color=color,
                lw=2)

    ax.set_title(
        ("Accumulated Precipitation after %s\n"
         "[%s] %s (%s-%s)") %
        (sdate.strftime("%-d %B"), station, ctx['_nt'].sts[station]['name'],
         ab.year, datetime.date.today().year))
    ax.set_ylabel("Precipitation [inch]")
    ax.grid(True)
    ax.legend(loc=2)
    xticks = []
    xticklabels = []
    for i in range(366):
        date = sdate + datetime.timedelta(days=i)
        if date.day != 1:
            continue
        xticks.append(i)
        xticklabels.append(date.strftime("%b"))
    ax.set_xlim(0, 367)
    ax.set_ylim(bottom=-0.1)
    ax.set_xticks(xticks)
    ax.set_xticklabels(xticklabels)

    return fig, df
Beispiel #48
0
def do_ugc(ctx):
    """Do UGC based logic."""
    pgconn = get_dbconn("postgis")
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    varname = ctx["v"]
    station = ctx["station"][:4]
    state = ctx["state"]
    phenomena = ctx["phenomena"]
    significance = ctx["significance"]
    t = ctx["t"]
    sdate = ctx["sdate"]
    edate = ctx["edate"]
    year = ctx["year"]
    year2 = ctx["year2"]
    if varname == "lastyear":
        if t == "cwa":
            cursor.execute(
                """
            select ugc, max(issue at time zone 'UTC') from warnings
            WHERE wfo = %s and phenomena = %s and significance = %s
            GROUP by ugc
            """,
                (
                    station if len(station) == 3 else station[1:],
                    phenomena,
                    significance,
                ),
            )
        else:
            cursor.execute(
                """
            select ugc, max(issue at time zone 'UTC') from warnings
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s GROUP by ugc
            """,
                (state, phenomena, significance),
            )
        rows = []
        data = {}
        for row in cursor:
            rows.append(dict(valid=row[1], year=row[1].year, ugc=row[0]))
            data[row[0]] = row[1].year
        ctx["title"] = "Year of Last"
        datavar = "year"
    elif varname == "yearcount":
        table = "warnings_%s" % (year, )
        if t == "cwa":
            cursor.execute(
                """
            select ugc, count(*) from """ + table + """
            WHERE wfo = %s and phenomena = %s and significance = %s
            GROUP by ugc
            """,
                (
                    station if len(station) == 3 else station[1:],
                    phenomena,
                    significance,
                ),
            )
        else:
            cursor.execute(
                """
            select ugc, count(*) from """ + table + """
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s GROUP by ugc
            """,
                (state, phenomena, significance),
            )
        rows = []
        data = {}
        for row in cursor:
            rows.append(dict(count=row[1], year=year, ugc=row[0]))
            data[row[0]] = row[1]
        ctx["title"] = "Count for %s" % (year, )
        datavar = "count"
    elif varname == "total":
        if t == "cwa":
            cursor.execute(
                """
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from warnings
            WHERE wfo = %s and phenomena = %s and significance = %s
            and issue >= %s and issue <= %s
            GROUP by ugc
            """,
                (
                    station if len(station) == 3 else station[1:],
                    phenomena,
                    significance,
                    sdate,
                    edate,
                ),
            )
        else:
            cursor.execute(
                """
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from warnings
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s and issue >= %s and issue < %s
            GROUP by ugc
            """,
                (state, phenomena, significance, sdate, edate),
            )
        rows = []
        data = {}
        for row in cursor:
            rows.append(
                dict(
                    count=row[1],
                    year=year,
                    ugc=row[0],
                    minissue=row[2],
                    maxissue=row[3],
                ))
            data[row[0]] = row[1]
        ctx["title"] = "Total"
        ctx["subtitle"] = (" between %s and %s UTC") % (
            sdate.strftime("%d %b %Y %H%M"),
            edate.strftime("%d %b %Y %H%M"),
        )
        datavar = "count"
    elif varname == "hour":
        cursor.execute(
            """
        WITH data as (
        SELECT ugc, issue at time zone tzname as v
        from warnings w JOIN stations t
        ON (w.wfo =
            (case when length(t.id) = 4 then substr(t.id, 1, 3) else t.id end))
        WHERE t.network = 'WFO' and
        phenomena = %s and significance = %s and issue >= %s and issue < %s),
        agg as (
            SELECT ugc, extract(hour from v) as hr, count(*) from data
            GROUP by ugc, hr),
        ranks as (
            SELECT ugc, hr, rank() OVER (PARTITION by ugc ORDER by count DESC)
            from agg)

        SELECT ugc, hr from ranks where rank = 1
        """,
            (phenomena, significance, sdate, edate),
        )
        rows = []
        data = {}
        ctx["labels"] = {}
        midnight = datetime.datetime(2000, 1, 1)
        for row in cursor:
            rows.append(dict(hour=int(row[1]), ugc=row[0]))
            data[row[0]] = row[1]
            ctx["labels"][row[0]] = (
                midnight + datetime.timedelta(hours=row[1])).strftime("%-I %p")
        ctx["title"] = ("Most Freq. Issue Hour: %s and %s") % (
            sdate.strftime("%d %b %Y"),
            edate.strftime("%d %b %Y"),
        )
        datavar = "hour"
    elif varname == "yearavg":
        if t == "cwa":
            cursor.execute(
                """
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from warnings
            WHERE wfo = %s and phenomena = %s and significance = %s
            and issue >= %s and issue <= %s
            GROUP by ugc
            """,
                (
                    station if len(station) == 3 else station[1:],
                    phenomena,
                    significance,
                    datetime.date(year, 1, 1),
                    datetime.date(year2 + 1, 1, 1),
                ),
            )
        else:
            cursor.execute(
                """
            select ugc, count(*), min(issue at time zone 'UTC'),
            max(issue at time zone 'UTC') from warnings
            WHERE substr(ugc, 1, 2) = %s and phenomena = %s
            and significance = %s and issue >= %s and issue < %s
            GROUP by ugc
            """,
                (
                    state,
                    phenomena,
                    significance,
                    datetime.date(year, 1, 1),
                    datetime.date(year2 + 1, 1, 1),
                ),
            )
        rows = []
        data = {}
        minv = datetime.datetime(2050, 1, 1)
        maxv = datetime.datetime(1986, 1, 1)
        for row in cursor:
            if row[2] < minv:
                minv = row[2]
            if row[3] > maxv:
                maxv = row[3]
            rows.append(
                dict(
                    count=row[1],
                    year=year,
                    ugc=row[0],
                    minissue=row[2],
                    maxissue=row[3],
                ))
            data[row[0]] = row[1]
        ctx["title"] = ("Yearly Avg: %s and %s") % (
            minv.strftime("%d %b %Y"),
            maxv.strftime("%d %b %Y"),
        )
        datavar = "average"

    if not rows:
        raise NoDataFound("Sorry, no data found for query!")
    df = pd.DataFrame(rows)
    if varname == "yearavg":
        years = maxv.year - minv.year + 1
        df["average"] = df["count"] / years
        for key in data:
            data[key] = round(data[key] / float(years), 2)
        maxv = df[datavar].max()
        for delta in [500, 50, 5, 1, 0.5, 0.05]:
            bins = np.arange(0, (maxv + 1.0) * 1.05, delta)
            if len(bins) > 8:
                break
        if len(bins) > 8:
            bins = bins[::int(len(bins) / 8.0)]
        bins[0] = 0.01
    elif varname == "hour":
        bins = list(range(0, 25))
    else:
        bins = list(
            range(np.min(df[datavar][:]),
                  np.max(df[datavar][:]) + 2, 1))
        if len(bins) < 3:
            bins.append(bins[-1] + 1)
        if len(bins) > 8:
            bins = np.linspace(
                np.min(df[datavar][:]),
                np.max(df[datavar][:]) + 2,
                8,
                dtype="i",
            )
    ctx["bins"] = bins
    ctx["data"] = data
    ctx["df"] = df
Beispiel #49
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('asos')
    ctx = get_autoplot_context(fdict, get_description())

    station = ctx['zstation']
    network = ctx['network']
    syear = ctx['syear']
    eyear = ctx['eyear']
    groupby = ctx['groupby']
    sts = datetime.date(syear, 1, 1)
    ets = datetime.date(eyear + 1, 1, 1)
    nt = NetworkTable(network)
    code = ctx['code']
    if code == 'PSN':
        code = "+SN"
        PDICT['+SN'] = PDICT['PSN']

    if groupby == 'week':
        data = np.ma.zeros((24, 52), 'f')
        df = read_sql("""
        WITH data as (
            SELECT valid at time zone %s + '10 minutes'::interval as v
            from alldata where
            station = %s and
            array_to_string(wxcodes, '') LIKE '%%""" + code + """%%'
            and valid > %s and valid < %s),
        agg as (
            SELECT distinct extract(week from v)::int as week,
            extract(doy from v)::int as doy,
            extract(year from v)::int as year,
            extract(hour from v)::int as hour
            from data)
        SELECT week, year, hour, count(*) from agg
        WHERE week < 53
        GROUP by week, year, hour
        """,
                      pgconn,
                      params=(nt.sts[station]['tzname'], station, sts, ets),
                      index_col=None)
    else:
        data = np.ma.zeros((24, 366), 'f')
        df = read_sql("""
        WITH data as (
            SELECT valid at time zone %s + '10 minutes'::interval as v
            from alldata where
            station = %s and
            array_to_string(wxcodes, '') LIKE '%%""" + code + """%%'
            and valid > %s and valid < %s),
        agg as (
            SELECT distinct
            extract(doy from v)::int as doy,
            extract(year from v)::int as year,
            extract(hour from v)::int as hour
            from data)
        SELECT doy, year, hour, count(*) from agg
        GROUP by doy, year, hour
        """,
                      pgconn,
                      params=(nt.sts[station]['tzname'], station, sts, ets),
                      index_col=None)
    if df.empty:
        raise ValueError("No data was found, sorry!")

    minyear = df['year'].min()
    maxyear = df['year'].max()
    for _, row in df.iterrows():
        data[row['hour'], row[groupby] - 1] += 1

    data.mask = np.where(data == 0, True, False)
    fig = plt.figure(figsize=(8, 6))
    ax = plt.axes([0.11, 0.25, 0.7, 0.65])
    cax = plt.axes([0.82, 0.04, 0.02, 0.15])

    res = ax.imshow(data,
                    aspect='auto',
                    rasterized=True,
                    interpolation='nearest')
    fig.colorbar(res, cax=cax)
    xloc = plt.MaxNLocator(4)
    cax.yaxis.set_major_locator(xloc)
    cax.set_ylabel("Count")
    ax.set_ylim(-0.5, 23.5)
    ax.set_yticks((0, 4, 8, 12, 16, 20))
    ax.set_ylabel("Local Time, %s" % (nt.sts[station]['tzname'], ))
    ax.set_yticklabels(('Mid', '4 AM', '8 AM', 'Noon', '4 PM', '8 PM'))
    ax.set_title(("[%s] %s %s Reports\n[%.0f - %.0f]"
                  " by hour and %s") %
                 (station, nt.sts[station]['name'], PDICT[code], minyear,
                  maxyear, PDICT2[groupby].replace("group ", "")))
    ax.grid(True)
    lax = plt.axes([0.11, 0.1, 0.7, 0.15])
    if groupby == 'week':
        ax.set_xticks(np.arange(0, 55, 7))
        lax.bar(np.arange(0, 52), np.ma.sum(data, 0), facecolor='tan')
        lax.set_xlim(-0.5, 51.5)
        lax.set_xticks(np.arange(0, 55, 7))
        lax.set_xticklabels(('Jan 1', 'Feb 19', 'Apr 8', 'May 27', 'Jul 15',
                             'Sep 2', 'Oct 21', 'Dec 9'))
    else:
        ax.set_xticks(
            [1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365])
        lax.bar(np.arange(0, 366), np.ma.sum(data, 0), facecolor='tan')
        lax.set_xlim(-0.5, 365.5)
        lax.set_xticks(
            [1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365])
        lax.set_xticklabels(calendar.month_abbr[1:])
    plt.setp(ax.get_xticklabels(), visible=False)

    # Bottom grid
    lax.grid(True)
    yloc = plt.MaxNLocator(3)
    lax.yaxis.set_major_locator(yloc)
    lax.yaxis.get_major_ticks()[-1].label1.set_visible(False)

    # Right grid
    rax = plt.axes([0.81, 0.25, 0.15, 0.65])
    rax.barh(np.arange(0, 24) - 0.4, np.ma.sum(data, 1), facecolor='tan')
    rax.set_ylim(-0.5, 23.5)
    rax.set_yticks([])
    xloc = plt.MaxNLocator(3)
    rax.xaxis.set_major_locator(xloc)
    rax.xaxis.get_major_ticks()[0].label1.set_visible(False)
    rax.grid(True)

    return fig, df
Beispiel #50
0
def run(wfo, year, phenomena, significance, etn):
    """Do great things"""
    pgconn = get_dbconn('postgis')
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    table = "warnings_%s" % (year,)
    # This is really a BUG here and we need to rearch the database
    cursor.execute("""
    SELECT
    first_value(report) OVER (ORDER by product_issue ASC) as report,
    first_value(svs) OVER (ORDER by product_issue ASC) as svs_updates,
    first_value(issue at time zone 'UTC')
        OVER (ORDER by issue ASC NULLS LAST) as utc_issue,
    first_value(expire at time zone 'UTC')
        OVER (ORDER by expire DESC NULLS LAST) as utc_expire
    from """+table+""" w
    WHERE w.wfo = %s and eventid = %s and
    phenomena = %s and significance = %s
    """, (wfo, etn, phenomena, significance))
    res = {
        'generation_time': datetime.datetime.utcnow().strftime(ISO9660),
        'year': year,
        'phenomena': phenomena,
        'significance': significance,
        'etn': etn,
        'wfo': wfo
        }
    if cursor.rowcount == 0:
        return json.dumps(res)

    row = cursor.fetchone()
    res['report'] = {'text': row['report']}
    res['svs'] = []
    if row['svs_updates'] is not None:
        for token in row['svs_updates'].split("__"):
            if token.strip() != '':
                res['svs'].append({'text': token})
    res['utc_issue'] = row['utc_issue'].strftime(ISO9660)
    res['utc_expire'] = row['utc_expire'].strftime(ISO9660)

    # Now lets get UGC information
    cursor.execute("""
    SELECT
    u.ugc,
    u.name,
    w.status,
    w.product_issue at time zone 'UTC' utc_product_issue,
    w.issue at time zone 'UTC' utc_issue,
    w.expire at time zone 'UTC' utc_expire,
    w.init_expire at time zone 'UTC' utc_init_expire,
    w.updated at time zone 'UTC' utc_updated
    from """+table+""" w JOIN ugcs u on (w.gid = u.gid)
    WHERE w.wfo = %s and eventid = %s and
    phenomena = %s and significance = %s
    ORDER by u.ugc ASC
    """, (wfo, etn, phenomena, significance))
    res['ugcs'] = []
    for row in cursor:
        res['ugcs'].append({
            'ugc': row['ugc'],
            'name': row['name'],
            'status': row['status'],
            'utc_product_issue': row['utc_product_issue'].strftime(ISO9660),
            'utc_issue': row['utc_issue'].strftime(ISO9660),
            'utc_init_expire': row['utc_init_expire'].strftime(ISO9660),
            'utc_expire': row['utc_expire'].strftime(ISO9660),
            'utc_updated': row['utc_updated'].strftime(ISO9660),
            })

    return json.dumps(res)
Beispiel #51
0
import mx.DateTime
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn

nt = NetworkTable(('IACLIMATE', 'ILCLIMATE', 'INCLIMATE', 'OHCLIMATE',
                   'MICLIMATE', 'KYCLIMATE', 'WICLIMATE', 'MNCLIMATE',
                   'SDCLIMATE', 'NDCLIMATE', 'NECLIMATE', 'KSCLIMATE',
                   'MOCLIMATE'))

_THISYEAR = mx.DateTime.now().year
_ENDYEAR = mx.DateTime.now().year + 1

_ARCHIVEENDTS = mx.DateTime.now() - mx.DateTime.RelativeDateTime(days=1)
_ENDTS = mx.DateTime.DateTime(_ENDYEAR, 1, 1)

mesosite = get_dbconn('mesosite', user='******')
mcursor = mesosite.cursor()
mcursor.execute("""
    SELECT propvalue from properties where propname = 'iaclimate.end'
""")
row = mcursor.fetchone()
_QCENDTS = mx.DateTime.strptime(row[0], '%Y-%m-%d')
mcursor.close()


def get_table(sid):
    """
    Return the table which has the data for this siteID
    """
    return "alldata_%s" % (sid[:2],)
Beispiel #52
0
"""Figure out if we have new daily records set"""
from __future__ import print_function
import sys
import json
import datetime

import psycopg2.extras
from pyiem.util import get_dbconn

COOP = get_dbconn('coop')
ccursor = COOP.cursor(cursor_factory=psycopg2.extras.DictCursor)
ccursor2 = COOP.cursor()

sts = datetime.datetime(int(sys.argv[1]), int(sys.argv[2]), 1)
ets = (sts + datetime.timedelta(days=35)).replace(day=1)

cnt = {
    'climate': {
        'max_high': 0,
        'min_high': 0,
        'max_low': 0,
        'min_low': 0,
        'max_precip': 0
    },
    'climate51': {
        'max_high': 0,
        'min_high': 0,
        'max_low': 0,
        'min_low': 0,
        'max_precip': 0
    }
Beispiel #53
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn("postgis")
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["station"][:4]
    ctx["_nt"].sts["_ALL"] = {"name": "All Offices"}

    fig = plt.figure(figsize=(8, 14 if station != "_ALL" else 21))
    ax = [None, None]
    ax[0] = plt.axes([0.1, 0.75, 0.85, 0.2])
    ax[1] = plt.axes([0.1, 0.05, 0.85, 0.65])

    if station == "_ALL":
        df = read_sql(
            """
            SELECT distinct extract(year from issue) as year,
                phenomena, significance from warnings WHERE
                phenomena is not null and significance is not null and
                issue > '2005-01-01'
            """,
            pgconn,
            index_col=None,
        )
    else:
        df = read_sql(
            """
            SELECT distinct extract(year from issue) as year,
            phenomena, significance from warnings WHERE
            wfo = %s and phenomena is not null and significance is not null
            and issue > '2005-01-01'
            """,
            pgconn,
            params=(station, ),
            index_col=None,
        )
    if df.empty:
        raise NoDataFound("No data was found for this WFO.")
    df["wfo"] = station
    df["year"] = df["year"].astype("i")
    gdf = df.groupby("year").count()

    ax[0].bar(gdf.index.values,
              gdf["wfo"],
              width=0.8,
              fc="b",
              ec="b",
              align="center")
    for yr, row in gdf.iterrows():
        ax[0].text(yr, row["wfo"] + 1, "%s" % (row["wfo"], ), ha="center")
    ax[0].set_title(("[%s] NWS %s\nCount of Distinct VTEC Phenomena/"
                     "Significance - %i to %i") % (
                         station,
                         ctx["_nt"].sts[station]["name"],
                         df["year"].min(),
                         df["year"].max(),
                     ))
    ax[0].grid()
    ax[0].set_ylabel("Count")
    ax[0].set_xlim(gdf.index.values.min() - 0.5, gdf.index.values.max() + 0.5)

    pos = {}
    i = 1
    df.sort_values(["phenomena", "significance"], inplace=True)
    for _, row in df.iterrows():
        key = "%s.%s" % (row["phenomena"], row["significance"])
        if key not in pos:
            pos[key] = i
            i += 1
        ax[1].text(
            row["year"],
            pos[key],
            key,
            ha="center",
            va="center",
            fontsize=10,
            bbox=dict(color="white"),
        )

    ax[1].set_title("VTEC <Phenomena.Significance> Issued by Year")
    ax[1].set_ylim(0, i)
    ax[1].grid(True)
    ax[1].set_xlim(gdf.index.values.min() - 0.5, gdf.index.values.max() + 0.5)
    return fig, df
Beispiel #54
0
def plotter(fdict):
    """ Go """
    pgconn = get_dbconn('iem')
    ctx = get_autoplot_context(fdict, get_description())
    varname = ctx['var']
    sector = ctx['sector']
    state = ctx['state']
    wfo = ctx['wfo']

    today = ctx['sdate']
    yesterday = today - datetime.timedelta(days=1)
    d180 = today - datetime.timedelta(days=180)

    df = read_sql("""
     with obs as (
      select station, valid,
      (case when low > low_normal then 1 else 0 end) as low_hit,
      (case when high > high_normal then 1 else 0 end) as high_hit,
      (case when precip >= 0.01 then 1 else 0 end) as precip_hit
      from cli_data
      where high is not null
      and high_normal is not null and low is not null and
      low_normal is not null
      and valid > %s and valid <= %s),

      totals as (
      SELECT station,
      max(case when low_hit = 0 then valid else %s end) as last_low_below,
      max(case when low_hit = 1 then valid else %s end) as last_low_above,
      max(case when high_hit = 0 then valid else %s end) as last_high_below,
      max(case when high_hit = 1 then valid else %s end) as last_high_above,
      max(case when precip_hit = 0 then valid else %s end) as last_dry,
      max(case when precip_hit = 1 then valid else %s end) as last_wet,
      count(*) as count from obs GROUP by station)

      SELECT station, last_low_below, last_low_above, last_high_below,
      last_high_above, last_dry, last_wet
      from totals where count > 170
    """,
                  pgconn,
                  params=(d180, today, d180, d180, d180, d180, d180, d180),
                  index_col='station')
    if df.empty:
        raise NoDataFound("No Data Found.")

    lats = []
    lons = []
    vals = []
    colors = []
    labels = []
    df['precip_days'] = (df['last_dry'] - df['last_wet']).dt.days
    df['low_days'] = (df['last_low_above'] - df['last_low_below']).dt.days
    df['high_days'] = (df['last_high_above'] - df['last_high_below']).dt.days
    # reorder the frame so that the largest values come first
    df = df.reindex(df[varname +
                       '_days'].abs().sort_values(ascending=False).index)

    for station, row in df.iterrows():
        if station not in ctx['_nt'].sts:
            continue
        lats.append(ctx['_nt'].sts[station]['lat'])
        lons.append(ctx['_nt'].sts[station]['lon'])
        if varname == 'precip':
            last_wet = row['last_wet']
            days = 0 if last_wet in [today, yesterday] else row['precip_days']
        else:
            days = row[varname + '_days']
        vals.append(days)
        colors.append('r' if days > 0 else 'b')
        labels.append(station[1:])

    title = ('Consecutive Days with %s Temp '
             'above(+)/below(-) Average') % (varname.capitalize(), )
    if varname == 'precip':
        title = 'Days Since Last Measurable Precipitation'
    mp = MapPlot(sector=sector,
                 state=state,
                 cwa=(wfo if len(wfo) == 3 else wfo[1:]),
                 axisbg='tan',
                 statecolor='#EEEEEE',
                 title=title,
                 subtitle=('based on NWS CLI Sites, map approximately '
                           'valid for %s') % (today.strftime("%-d %b %Y"), ))
    mp.plot_values(lons,
                   lats,
                   vals,
                   color=colors,
                   labels=labels,
                   labeltextsize=(8 if sector != 'state' else 12),
                   textsize=(12 if sector != 'state' else 16),
                   labelbuffer=10)

    return mp.fig, df
Beispiel #55
0
def plotter(fdict):
    """ Go """
    ctx = util.get_autoplot_context(fdict, get_description())
    date = ctx['date']
    sector = ctx['sector']
    threshold = ctx['threshold']
    threshold_mm = distance(threshold, 'IN').value('MM')
    window_sts = date - datetime.timedelta(days=90)
    if window_sts.year != date.year:
        raise ValueError('Sorry, do not support multi-year plots yet!')

    # idx0 = iemre.daily_offset(window_sts)
    idx1 = iemre.daily_offset(date)
    ncfn = iemre.get_daily_mrms_ncname(date.year)
    ncvar = 'p01d'
    nc = util.ncopen(ncfn)
    if nc is None:
        raise ValueError("No data for that year, sorry.")

    # Get the state weight
    df = gpd.GeoDataFrame.from_postgis("""
    SELECT the_geom from states where state_abbr = %s
    """,
                                       util.get_dbconn('postgis'),
                                       params=(sector, ),
                                       index_col=None,
                                       geom_col='the_geom')
    czs = CachingZonalStats(iemre.MRMS_AFFINE)
    czs.gen_stats(
        np.zeros((nc.variables['lat'].size, nc.variables['lon'].size)),
        df['the_geom'])
    jslice = None
    islice = None
    for nav in czs.gridnav:
        # careful here as y is flipped in this context
        jslice = slice(nc.variables['lat'].size - (nav.y0 + nav.ysz),
                       nc.variables['lat'].size - nav.y0)
        islice = slice(nav.x0, nav.x0 + nav.xsz)

    grid = np.zeros((jslice.stop - jslice.start, islice.stop - islice.start))
    total = np.zeros((jslice.stop - jslice.start, islice.stop - islice.start))
    for i, idx in enumerate(range(idx1, idx1 - 90, -1)):
        total += nc.variables[ncvar][idx, jslice, islice]
        grid = np.where(np.logical_and(grid == 0, total > threshold_mm), i,
                        grid)
    lon = nc.variables['lon'][islice]
    lat = nc.variables['lat'][jslice]
    nc.close()

    mp = MapPlot(sector='state',
                 state=sector,
                 titlefontsize=14,
                 subtitlefontsize=12,
                 title=("NOAA MRMS Q3: Number of Recent Days "
                        "till Accumulating %s\" of Precip") % (threshold, ),
                 subtitle=("valid %s: based on per calendar day "
                           "estimated preciptation, GaugeCorr and "
                           "RadarOnly products") %
                 (date.strftime("%-d %b %Y"), ))
    x, y = np.meshgrid(lon, lat)
    cmap = plt.get_cmap('terrain')
    cmap.set_over('k')
    cmap.set_under('white')
    mp.pcolormesh(x, y, grid, np.arange(0, 81, 10), cmap=cmap, units='days')
    mp.drawcounties()
    mp.drawcities()

    return mp.fig
Beispiel #56
0
"""Legacy."""
from io import StringIO
import datetime

from paste.request import parse_formvars
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn
from pyiem.templates.iem import TEMPLATE

nt = NetworkTable("IACLIMATE")

COOP = get_dbconn("coop")
ccursor = COOP.cursor()


def weather_logic(month, high, low, rain, snow):
    """Do Something."""
    deltaT = high - low

    if month > 4 and month < 11:  # It is summer
        if deltaT >= 30:
            if rain == 0.00:
                return "Sunny!!"
            return "Mostly sunny w/ Rain!!"
        if deltaT >= 15:
            if rain == 0.00:
                return "Mostly Sunny!!"
            return "Partly Sunny w/ Rain!!"
        if rain == 0.00:
            return "Cloudy!!"
        return "Cloudy and rainy!!"
Beispiel #57
0
def fetch_hourly(form, cols):
    ''' Return a fetching of hourly data '''
    pgconn = get_dbconn('isuag', user='******')
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    sts, ets = get_dates(form)
    stations = get_stations(form)
    delim = get_delimiter(form)
    if delim == 'tab':
        delim = '\t'
    elif delim == 'comma':
        delim = ','
    elif delim == 'space':
        delim = ' '

    if not cols:
        cols = ["station", "valid", "tmpf", "relh", "solar", "precip",
                "speed", "drct", "et", "soil04t", "soil12t", "soil24t",
                "soil50t",
                "soil12vwc", "soil24vwc", "soil50vwc"]
    else:
        cols.insert(0, 'valid')
        cols.insert(0, 'station')

    table = 'sm_hourly'
    sqlextra = ', null as bp_mb_qc, etalfalfa_qc '
    if form.getfirst('timeres') == '15minute':
        table = 'sm_15minute'
        sqlextra = ', bp_mb_qc, null as etalfalfa_qc'
    else:
        if "bp_mb" in cols:
            cols.remove('bp_mb')
    cursor.execute("""SELECT station, valid, tair_c_avg_qc, rh_qc,
    slrkw_avg_qc,
    rain_mm_tot_qc, ws_mps_s_wvt_qc, winddir_d1_wvt_qc,
    tsoil_c_avg_qc,
    t12_c_avg_qc, t24_c_avg_qc, t50_c_avg_qc, calc_vwc_12_avg_qc,
    calc_vwc_24_avg_qc, calc_vwc_50_avg_qc, lwmv_1_qc, lwmv_2_qc,
    lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc,
    lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc """ + sqlextra + """
    from """ + table + """
    WHERE valid >= '%s 00:00' and valid < '%s 00:00' and station in %s
    ORDER by valid ASC
    """ % (sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"),
           str(tuple(stations))))

    values = []

    for row in cursor:
        valid = row['valid']
        station = row['station']
        tmpf = temperature(row['tair_c_avg_qc'],
                           'C').value(
            'F') if row['tair_c_avg_qc'] is not None else -99
        relh = row['rh_qc'] if row['rh_qc'] is not None else -99
        solar = row['slrkw_avg_qc'] if row['slrkw_avg_qc'] is not None else -99
        precip = (distance(row['rain_mm_tot_qc'], 'MM').value('IN')
                  if row['rain_mm_tot_qc'] is not None else -99)
        speed = (row['ws_mps_s_wvt_qc'] * 2.23
                 if row['ws_mps_s_wvt_qc'] is not None
                 else -99)
        drct = (row['winddir_d1_wvt_qc']
                if row['winddir_d1_wvt_qc'] is not None
                else -99)
        et = (distance(row['etalfalfa_qc'], 'MM').value('IN')
              if row['etalfalfa_qc'] is not None else -99)
        soil04t = temperature(row['tsoil_c_avg_qc'],
                              'C').value(
            'F') if row['tsoil_c_avg_qc'] is not None else -99
        soil12t = temperature(row['t12_c_avg_qc'],
                              'C').value(
            'F') if row['t12_c_avg_qc'] is not None else -99
        soil24t = temperature(row['t24_c_avg_qc'],
                              'C').value(
            'F') if row['t24_c_avg_qc'] is not None else -99
        soil50t = temperature(row['t50_c_avg_qc'],
                              'C').value(
            'F') if row['t50_c_avg_qc'] is not None else -99
        soil12vwc = (row['calc_vwc_12_avg_qc']
                     if row['calc_vwc_12_avg_qc'] is not None else -99)
        soil24vwc = (row['calc_vwc_24_avg_qc']
                     if row['calc_vwc_24_avg_qc'] is not None else -99)
        soil50vwc = (row['calc_vwc_50_avg_qc']
                     if row['calc_vwc_50_avg_qc'] is not None else -99)
        bp_mb = (row['bp_mb_qc']
                 if row['bp_mb_qc'] is not None else -99)

        values.append(dict(station=station,
                           valid=valid.strftime("%Y-%m-%d %H:%M"),
                           tmpf=tmpf, relh=relh, solar=solar, precip=precip,
                           speed=speed, drct=drct, et=et, soil04t=soil04t,
                           soil12t=soil12t, soil24t=soil24t, soil50t=soil50t,
                           soil12vwc=soil12vwc, soil24vwc=soil24vwc,
                           soil50vwc=soil50vwc,
                           lwmv_1=row['lwmv_1_qc'],
                           lwmv_2=row['lwmv_2_qc'],
                           lwmdry_1_tot=row['lwmdry_1_tot_qc'],
                           lwmcon_1_tot=row['lwmcon_1_tot_qc'],
                           lwmwet_1_tot=row['lwmwet_1_tot_qc'],
                           lwmdry_2_tot=row['lwmdry_2_tot_qc'],
                           lwmcon_2_tot=row['lwmcon_2_tot_qc'],
                           lwmwet_2_tot=row['lwmwet_2_tot_qc'],
                           bpres_avg=row['bpres_avg_qc'], bp_mb=bp_mb))
    return values, cols
Beispiel #58
0
def fetch_daily(form, cols):
    ''' Return a fetching of daily data '''
    pgconn = get_dbconn('isuag', user='******')
    cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    sts, ets = get_dates(form)
    stations = get_stations(form)
    delim = get_delimiter(form)
    if delim == 'tab':
        delim = '\t'
    elif delim == 'comma':
        delim = ','
    elif delim == 'space':
        delim = ' '

    if not cols:
        cols = ["station", "valid", "high", "low", "rh_min", "rh", "rh_max",
                "gdd50", "solar",
                "precip", "sped", "gust", "et", "soil04t", "soil12t",
                "soil24t", "soil50t", "soil12vwc", "soil24vwc", "soil50vwc"]
    else:
        cols.insert(0, 'valid')
        cols.insert(0, 'station')

    sql = """
    --- Get the Daily Max/Min soil values
    WITH soils as (
      SELECT station, date(valid) as date, min(rh) as rh_min, avg(rh) as rh,
      max(rh) as rh_max,
      min(tsoil_c_avg_qc) as soil04tn, max(tsoil_c_avg_qc) as soil04tx,
      min(t12_c_avg_qc) as soil12tn, max(t12_c_avg_qc) as soil12tx,
      min(t24_c_avg_qc) as soil24tn, max(t24_c_avg_qc) as soil24tx,
      min(t50_c_avg_qc) as soil50tn, max(t50_c_avg_qc) as soil50tx
      from sm_hourly where
      valid >= '%s 00:00' and valid < '%s 00:00' and station in %s
      GROUP by station, date
    ), daily as (
      SELECT station, valid, tair_c_max_qc, tair_c_min_qc, slrmj_tot_qc,
      rain_mm_tot_qc, dailyet_qc, tsoil_c_avg_qc, t12_c_avg_qc, t24_c_avg_qc,
      t50_c_avg_qc, calc_vwc_12_avg_qc, calc_vwc_24_avg_qc, calc_vwc_50_avg_qc,
      ws_mps_s_wvt_qc, ws_mps_max_qc, lwmv_1_qc, lwmv_2_qc,
      lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc,
      lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc from sm_daily WHERE
      valid >= '%s 00:00' and valid < '%s 00:00' and station in %s
    )
    SELECT d.station, d.valid, s.date, s.soil04tn, s.soil04tx, s.rh,
    s.rh_min, s.rh_max,
    s.soil12tn, s.soil12tx, s.soil24tn, s.soil24tx,
    s.soil50tn, s.soil50tx, tair_c_max_qc, tair_c_min_qc, slrmj_tot_qc,
    rain_mm_tot_qc, dailyet_qc, tsoil_c_avg_qc, t12_c_avg_qc, t24_c_avg_qc,
    t50_c_avg_qc, calc_vwc_12_avg_qc, calc_vwc_24_avg_qc, calc_vwc_50_avg_qc,
    ws_mps_s_wvt_qc, ws_mps_max_qc, round(gddxx(50, 86, c2f( tair_c_max_qc ),
    c2f( tair_c_min_qc ))::numeric,1) as gdd50, lwmv_1_qc, lwmv_2_qc,
    lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc,
    lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc
    FROM soils s JOIN daily d on (d.station = s.station and s.date = d.valid)
    ORDER by d.valid ASC
    """ % (
      sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"), str(tuple(stations)),
      sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"), str(tuple(stations))
      )
    cursor.execute(sql)

    values = []

    for row in cursor:
        valid = row['valid']
        station = row['station']
        high = temperature(row['tair_c_max_qc'],
                           'C').value(
            'F') if row['tair_c_max_qc'] is not None else -99
        low = temperature(row['tair_c_min_qc'],
                          'C').value(
            'F') if row['tair_c_min_qc'] is not None else -99
        precip = (distance(row['rain_mm_tot_qc'], 'MM').value('IN')
                  if row['rain_mm_tot_qc'] > 0 else 0)
        et = (distance(row['dailyet_qc'], 'MM').value('IN')
              if row['dailyet_qc'] is not None and
              row['dailyet_qc'] > 0 else 0)

        soil04t = temperature(row['tsoil_c_avg_qc'],
                              'C').value(
            'F') if row['tsoil_c_avg_qc'] is not None else -99
        soil04tn = temperature(row['soil04tn'],
                               'C').value(
            'F') if row['soil04tn'] is not None else -99
        soil04tx = temperature(row['soil04tx'],
                               'C').value(
            'F') if row['soil04tx'] is not None else -99

        soil12t = temperature(row['t12_c_avg_qc'],
                              'C').value(
            'F') if row['t12_c_avg_qc'] is not None else -99
        soil12tn = temperature(row['soil12tn'],
                               'C').value(
            'F') if row['soil12tn'] is not None else -99
        soil12tx = temperature(row['soil12tx'],
                               'C').value(
            'F') if row['soil12tx'] is not None else -99

        soil24t = temperature(row['t24_c_avg_qc'],
                              'C').value(
            'F') if row['t24_c_avg_qc'] is not None else -99
        soil24tn = temperature(row['soil24tn'],
                               'C').value(
            'F') if row['soil24tn'] is not None else -99
        soil24tx = temperature(row['soil24tx'],
                               'C').value(
            'F') if row['soil24tx'] is not None else -99

        soil50t = temperature(row['t50_c_avg_qc'],
                              'C').value(
            'F') if row['t50_c_avg_qc'] is not None else -99
        soil50tn = temperature(row['soil50tn'],
                               'C').value(
            'F') if row['soil50tn'] is not None else -99
        soil50tx = temperature(row['soil50tx'],
                               'C').value(
            'F') if row['soil50tx'] is not None else -99

        soil12vwc = (row['calc_vwc_12_avg_qc']
                     if row['calc_vwc_12_avg_qc'] is not None
                     else -99)
        soil24vwc = (row['calc_vwc_24_avg_qc']
                     if row['calc_vwc_24_avg_qc'] is not None
                     else -99)
        soil50vwc = (row['calc_vwc_50_avg_qc']
                     if row['calc_vwc_50_avg_qc'] is not None
                     else -99)
        speed = (row['ws_mps_s_wvt_qc'] * 2.23
                 if row['ws_mps_s_wvt_qc'] is not None
                 else -99)
        gust = (row['ws_mps_max_qc'] * 2.23
                if row['ws_mps_max_qc'] is not None
                else -99)

        values.append(dict(station=station, valid=valid.strftime("%Y-%m-%d"),
                           high=high, low=low, solar=row['slrmj_tot_qc'],
                           rh=row['rh'], rh_min=row['rh_min'],
                           rh_max=row['rh_max'],
                           gdd50=row['gdd50'], precip=precip, sped=speed,
                           gust=gust, et=et, soil04t=soil04t, soil12t=soil12t,
                           soil24t=soil24t, soil50t=soil50t,
                           soil04tn=soil04tn, soil04tx=soil04tx,
                           soil12tn=soil12tn, soil12tx=soil12tx,
                           soil24tn=soil24tn, soil24tx=soil24tx,
                           soil50tn=soil50tn, soil50tx=soil50tx,
                           soil12vwc=soil12vwc, soil24vwc=soil24vwc,
                           soil50vwc=soil50vwc,
                           lwmv_1=row['lwmv_1_qc'],
                           lwmv_2=row['lwmv_2_qc'],
                           lwmdry_1_tot=row['lwmdry_1_tot_qc'],
                           lwmcon_1_tot=row['lwmcon_1_tot_qc'],
                           lwmwet_1_tot=row['lwmwet_1_tot_qc'],
                           lwmdry_2_tot=row['lwmdry_2_tot_qc'],
                           lwmcon_2_tot=row['lwmcon_2_tot_qc'],
                           lwmwet_2_tot=row['lwmwet_2_tot_qc'],
                           bpres_avg=row['bpres_avg_qc']))

    return values, cols
Beispiel #59
0
"""Go through our data sheets and cleanup entries that don't exactly match
things that we would like to see"""
import pyiem.cscap_utils as util
from pyiem.util import get_dbconn
from pandas.io.sql import read_sql

pgconn = get_dbconn("sustainablecorn")
df = read_sql("SELECT * from plotids", pgconn, index_col=None)
df["key"] = df["uniqueid"] + "::" + df["plotid"]
df.set_index("key", inplace=True)

config = util.get_config()
spr_client = util.get_spreadsheet_client(config)
drive = util.get_driveclient(config)

# Fake last conditional to make it easy to reprocess one site...
res = (drive.files().list(q=("title contains 'Agronomic Data'"),
                          maxResults=999).execute())

HEADERS = [
    "uniqueid",
    "plotid",
    "depth",
    "tillage",
    "rotation",
    "soil6",
    "nitrogen",
    "drainage",
    "rep",
    "subsample",
    "landscape",
Beispiel #60
0
def plotter(fdict):
    """ Go """
    ctx = get_autoplot_context(fdict, get_description())
    station = ctx["station"]
    if station not in ctx["_nt"].sts:  # This is needed.
        raise NoDataFound("Unknown station metadata.")
    varname = ctx["var"]
    hour = int(ctx["hour"])
    month = ctx["month"]
    level = ctx["level"]
    agg = ctx["agg"]
    offset = 0
    if month == "all":
        months = range(1, 13)
    elif month == "fall":
        months = [9, 10, 11]
    elif month == "winter":
        months = [12, 1, 2]
        offset = 32
    elif month == "spring":
        months = [3, 4, 5]
    elif month == "mjj":
        months = [5, 6, 7]
    elif month == "summer":
        months = [6, 7, 8]
    else:
        ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
        # make sure it is length two for the trick below in SQL
        months = [ts.month]

    name = ctx["_nt"].sts[station]["name"]
    stations = [station]
    if station.startswith("_"):
        name = ctx["_nt"].sts[station]["name"].split("--")[0]
        stations = (
            ctx["_nt"].sts[station]["name"].split("--")[1].strip().split(" "))
    pgconn = get_dbconn("postgis")

    if varname in ["tmpc", "dwpc", "height", "smps"]:
        leveltitle = " @ %s hPa" % (level, )
        dfin = read_sql(
            """
            select extract(year from f.valid + '%s days'::interval) as year,
            avg(""" + varname + """) as avg_""" + varname + """,
            min(""" + varname + """) as min_""" + varname + """,
            max(""" + varname + """) as max_""" + varname + """,
            count(*)
            from raob_profile p JOIN raob_flights f on (p.fid = f.fid)
            WHERE f.station in %s and p.pressure = %s and
            extract(hour from f.valid at time zone 'UTC') = %s and
            extract(month from f.valid) in %s
            GROUP by year ORDER by year ASC
        """,
            pgconn,
            params=(offset, tuple(stations), level, hour, tuple(months)),
            index_col="year",
        )
    else:
        leveltitle = ""
        dfin = read_sql(
            """
            select extract(year from f.valid + '%s days'::interval) as year,
            count(*),
            avg(""" + varname + """) as avg_""" + varname + """,
            min(""" + varname + """) as min_""" + varname + """,
            max(""" + varname + """) as max_""" + varname + """
            from raob_flights f
            WHERE f.station in %s and
            extract(hour from f.valid at time zone 'UTC') = %s and
            extract(month from f.valid) in %s
            GROUP by year ORDER by year ASC
        """,
            pgconn,
            params=(offset, tuple(stations), hour, tuple(months)),
            index_col="year",
        )
    # need quorums
    df = dfin[dfin["count"] > ((len(months) * 28) * 0.75)]
    if df.empty:
        raise NoDataFound("No data was found!")
    colname = "%s_%s" % (agg, varname)
    fig, ax = plt.subplots(1, 1)
    avgv = df[colname].mean()
    bars = ax.bar(df.index.values, df[colname], align="center")
    for i, _bar in enumerate(bars):
        val = df.iloc[i][colname]
        if val < avgv:
            _bar.set_color("blue")
        else:
            _bar.set_color("red")
    ax.set_xlim(df.index.min() - 1, df.index.max() + 1)
    rng = df[colname].max() - df[colname].min()
    ax.set_ylim(df[colname].min() - rng * 0.1, df[colname].max() + rng * 0.1)
    ax.axhline(avgv, color="k")
    ax.text(df.index.values[-1] + 2, avgv, "Avg:\n%.1f" % (avgv, ))
    ax.set_xlabel("Year")
    ax.set_ylabel("%s %s%s" % (PDICT4[agg], PDICT3[varname], leveltitle))
    plt.gcf().text(
        0.5,
        0.9,
        ("%s %s %02i UTC Sounding\n"
         "%s %s%s over %s") % (
             station,
             name,
             hour,
             PDICT4[agg],
             PDICT3[varname],
             leveltitle,
             MDICT[month],
         ),
        ha="center",
        va="bottom",
    )
    ax.grid(True)

    return fig, df