def plotter(fdict): """ Go """ ctx = get_autoplot_context(fdict, get_description()) ts = ctx['ts'].replace(tzinfo=pytz.utc) hour = int(ctx['hour']) ilabel = (ctx['ilabel'] == 'yes') column = "hour%02i" % (hour,) pgconn = get_dbconn('postgis') df = read_sql(""" WITH data as ( SELECT ugc, rank() OVER (PARTITION by ugc ORDER by valid DESC), hour01, hour03, hour06, hour12, hour24 from ffg WHERE valid >= %s and valid <= %s) SELECT *, substr(ugc, 3, 1) as ztype from data where rank = 1 """, pgconn, params=(ts - datetime.timedelta(hours=24), ts), index_col='ugc') plot = MapPlot(sector=ctx['t'], continentalcolor='white', state=ctx['state'], cwa=ctx['wfo'], title=("NWS RFC %s Hour Flash Flood Guidance on " "%s UTC" ) % (hour, ts.strftime("%-d %b %Y %H")), subtitle=("Estimated amount of %s Rainfall " "needed for non-urban Flash Flooding to commence" ) % (HOURS[ctx['hour']], )) bins = [0.01, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.25, 2.5, 2.75, 3., 3.5, 4.0, 5.0] cmap = plt.get_cmap('gist_rainbow_r') df2 = df[df['ztype'] == 'C'] plot.fill_ugcs(df2[column].to_dict(), bins, cmap=cmap, plotmissing=False, ilabel=ilabel) df2 = df[df['ztype'] == 'Z'] plot.fill_ugcs(df2[column].to_dict(), bins, cmap=cmap, plotmissing=False, units='inches', ilabel=ilabel) return plot.fig, df
def test_drawugcs2(): """3 filled zones""" mp = MapPlot(sector='iowa', title='Zones, 3 filled in Iowa, label', subtitle='test_drawugcs2', nocaption=True) mp.fill_ugcs({"IAZ001": 10, "IAZ003": 20, "IAZ005": 30}, ilabel=True) return mp.fig
def plotter(fdict): """ Go """ import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from pyiem.plot import MapPlot ctx = get_autoplot_context(fdict, get_description()) # Covert datetime to UTC ctx['sdate'] = ctx['sdate'].replace(tzinfo=pytz.utc) ctx['edate'] = ctx['edate'].replace(tzinfo=pytz.utc) state = ctx['state'] phenomena = ctx['phenomena'] significance = ctx['significance'] station = ctx['station'][:4] t = ctx['t'] ilabel = (ctx['ilabel'] == 'yes') geo = ctx['geo'] nt = NetworkTable("WFO") if geo == 'ugc': do_ugc(ctx) elif geo == 'polygon': do_polygon(ctx) subtitle = "based on IEM Archives %s" % (ctx.get('subtitle', ''), ) if t == 'cwa': subtitle = "Plotted for %s (%s), %s" % (nt.sts[station]['name'], station, subtitle) else: subtitle = "Plotted for %s, %s" % (state_names[state], subtitle) m = MapPlot(sector=('state' if t == 'state' else 'cwa'), state=state, cwa=(station if len(station) == 3 else station[1:]), axisbg='white', title=('%s %s (%s.%s)') % (ctx['title'], vtec.get_ps_string( phenomena, significance), phenomena, significance), subtitle=subtitle, nocaption=True, titlefontsize=16) if geo == 'ugc': cmap = plt.get_cmap('Paired') cmap.set_under('white') cmap.set_over('white') m.fill_ugcs(ctx['data'], ctx['bins'], cmap=cmap, ilabel=ilabel) else: cmap = plt.get_cmap('jet') cmap.set_under('white') cmap.set_over('black') res = m.pcolormesh(ctx['lons'], ctx['lats'], ctx['data'], ctx['bins'], cmap=cmap, units='count') # Cut down on SVG et al size res.set_rasterized(True) if ctx['drawc'] == 'yes': m.drawcounties() return m.fig, ctx['df']
def main(): """Do Something""" pgconn = get_dbconn('postgis') df = read_sql(""" SELECT ugc, count(*) from warnings_2011 where phenomena = 'TO' and significance = 'A' and issue < '2011-05-18' GROUP by ugc ORDER by count DESC """, pgconn, index_col='ugc') mp = MapPlot(sector='nws', title=('1 Jan - 17 May 2011 Number of Storm Prediction Center' ' Tornado Watches by County'), subtitle=('count by county, ' 'based on unofficial archives maintained by the ' 'IEM')) bins = list(range(0, 19, 2)) bins[0] = 1 print(df['count'].max()) cmap = plt.get_cmap('plasma') cmap.set_under('white') cmap.set_over('black') mp.fill_ugcs(df['count'].to_dict(), bins, cmap=cmap) mp.postprocess(filename='test.png')
def plotter(fdict): """ Go """ import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from pyiem.plot import MapPlot pgconn = psycopg2.connect(database='postgis', host='iemdb', user='******') cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) state = fdict.get('state', 'IA') phenomena = fdict.get('phenomena', 'TO') significance = fdict.get('significance', 'A') station = fdict.get('station', 'DMX')[:4] sdate = datetime.datetime.strptime(fdict.get('sdate', '2015-01-01'), '%Y-%m-%d') edate = datetime.datetime.strptime(fdict.get('edate', '2015-01-01'), '%Y-%m-%d') t = fdict.get('t', 'state') varname = fdict.get('v', 'lastyear') year = int(fdict.get('year', 2015)) year2 = int(fdict.get('year2', 2015)) ilabel = (fdict.get('ilabel', 'no') == 'yes') nt = NetworkTable("WFO") if varname == 'lastyear': if t == 'cwa': cursor.execute( """ select ugc, max(issue at time zone 'UTC') from warnings WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance)) else: cursor.execute( """ select ugc, max(issue at time zone 'UTC') from warnings WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance)) rows = [] data = {} for row in cursor: rows.append(dict(valid=row[1], year=row[1].year, ugc=row[0])) data[row[0]] = row[1].year title = "Year of Last" datavar = "year" elif varname == 'yearcount': table = "warnings_%s" % (year, ) if t == 'cwa': cursor.execute( """ select ugc, count(*) from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance)) else: cursor.execute( """ select ugc, count(*) from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance)) rows = [] data = {} for row in cursor: rows.append(dict(count=row[1], year=year, ugc=row[0])) data[row[0]] = row[1] title = "Count for %s" % (year, ) datavar = "count" elif varname == 'total': table = "warnings" if t == 'cwa': cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance, sdate, edate)) else: cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, sdate, edate)) rows = [] data = {} for row in cursor: rows.append( dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = "Total between %s and %s" % (sdate.strftime("%d %b %Y"), edate.strftime("%d %b %Y")) datavar = "count" elif varname == 'yearavg': table = "warnings" if t == 'cwa': cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance, datetime.date( year, 1, 1), datetime.date(year2 + 1, 1, 1))) else: cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, datetime.date( year, 1, 1), datetime.date(year2 + 1, 1, 1))) rows = [] data = {} minv = datetime.datetime(2050, 1, 1) maxv = datetime.datetime(1986, 1, 1) for row in cursor: if row[2] < minv: minv = row[2] if row[3] > maxv: maxv = row[3] rows.append( dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = ("Yearly Avg: %s and %s") % (minv.strftime("%d %b %Y"), maxv.strftime("%d %b %Y")) datavar = "average" if len(rows) == 0: return ("Sorry, no data found for query!") df = pd.DataFrame(rows) if varname == 'yearavg': years = maxv.year - minv.year + 1 df['average'] = df['count'] / years for key in data: data[key] = round(data[key] / float(years), 2) bins = range(0, int(np.max(df[datavar][:])) + 2, 1) else: bins = range(np.min(df[datavar][:]), np.max(df[datavar][:]) + 2, 1) if len(bins) < 3: bins.append(bins[-1] + 1) if len(bins) > 8: bins = np.linspace(np.min(df[datavar][:]), np.max(df[datavar][:]) + 2, 8, dtype='i') subtitle = "based on IEM Archives of NWS WWA" if t == 'cwa': subtitle = "Plotted for %s (%s), %s" % (nt.sts[station]['name'], station, subtitle) else: subtitle = "Plotted for %s, %s" % (state_names[state], subtitle) m = MapPlot(sector=('state' if t == 'state' else 'cwa'), state=state, cwa=(station if len(station) == 3 else station[1:]), axisbg='white', title=('%s %s %s (%s.%s)') % (title, vtec._phenDict[phenomena], vtec._sigDict[significance], phenomena, significance), subtitle=subtitle, nocaption=True, titlefontsize=16) cmap = plt.get_cmap('Paired') cmap.set_over('white') cmap.set_under('white') m.fill_ugcs(data, bins, cmap=cmap, ilabel=ilabel) return m.fig, df
GROUP by ugc), u as (SELECT ugc, ST_Area(ST_Transform(geom, 2163)) / 1000000. as area from ugcs where substr(ugc,3,1) = 'C' and end_ts is null) SELECT data.ugc, data.data, data.data / u.area from data JOIN u on (u.ugc = data.ugc) ''' pcursor.execute(""" WITH data as ( SELECT ugc, count(*) / %s as data from (select distinct ugc, generate_series(issue, expire, '1 minute'::interval) from warnings where phenomena in %s and significance = 'W' and ugc is not null and (expire - issue) < '1440 minutes'::interval and issue > %s and issue < %s) as foo2 GROUP by ugc), u as (SELECT ugc, ST_Area(ST_Transform(geom, 2163)) / 1000000. as area from ugcs where substr(ugc,3,1) = 'C' and end_ts is null) SELECT data.ugc, data.data, data.data / u.area from data JOIN u on (u.ugc = data.ugc) """, (opts['years'], opts['dbcols'], opts['sdate'], opts['edate'])) data = {} for row in pcursor: data[row[0]] = float(row[2 if opts['normalized'] else 1]) m.fill_ugcs(data, bins, cmap=cmap, units=opts['units']) m.postprocess(filename=fn) subprocess.call("xv %s" % (fn,), shell=True)
GROUP by ugc_county ORDER by count DESC """) data = {} for row in icursor: data[row[0]] = row[1] # Query out centroids of counties... pcursor.execute(""" SELECT ugc, ST_x(ST_centroid(geom)) as lon, ST_y(ST_centroid(geom)) as lat from ugcs WHERE state = 'IA' and end_ts is null and substr(ugc,3,1) = 'C' """) clons = [] clats = [] cvals = [] for row in pcursor: cvals.append(data.get(row[0], 0)) clats.append(row[2]) clons.append(row[1]) m = MapPlot(axisbg='white', title='Iowa CoCoRaHS Observers Per County', subtitle=("Sites with at least one report in past year " "(Sep 2015-2016)")) m.fill_ugcs(data, [1, 2, 3, 4, 5, 7, 10, 15, 20]) m.plot_values(clons, clats, cvals, labelbuffer=0) m.drawcounties() m.postprocess(filename='test.png')
bins = np.arange(0, 25, 1) norm = mpcolors.BoundaryNorm(bins, cmap.N) pcursor.execute(""" WITH data as ( SELECT ugc, issue at time zone tzname as v from warnings w JOIN stations t ON (w.wfo = (case when length(t.id) = 4 then substr(t.id, 1, 3) else t.id end)) WHERE t.network = 'WFO' and phenomena = 'SV' and significance = 'W' and issue is not null), agg as ( SELECT ugc, extract(hour from v) as hr, count(*) from data GROUP by ugc, hr), ranks as ( SELECT ugc, hr, rank() OVER (PARTITION by ugc ORDER by count DESC) from agg) SELECT ugc, hr from ranks where rank = 1 """) data = {} for row in pcursor: data[row[0]] = float(row[1]) cl = ['Mid', '', '2 AM', '', '4 AM', '', '6 AM', '', '8 AM', '', '10 AM', '', 'Noon', '', '2 PM', '', '4 PM', '', '6 PM', '', '8 PM', '', '10 PM', ''] m.fill_ugcs(data, bins, cmap=cmap, units='Hour of Day', clevstride=2, clevlabels=cl) m.postprocess(filename='test.png')
def plotter(fdict): """ Go """ ctx = get_autoplot_context(fdict, get_description()) ts = ctx["ts"].replace(tzinfo=pytz.utc) hour = int(ctx["hour"]) ilabel = ctx["ilabel"] == "yes" plot = MapPlot( sector=ctx["t"], continentalcolor="white", state=ctx["state"], cwa=ctx["wfo"], title=("NWS RFC %s Hour Flash Flood Guidance on %s UTC") % (hour, ts.strftime("%-d %b %Y %H")), subtitle=("Estimated amount of %s Rainfall " "needed for non-urban Flash Flooding to commence") % (HOURS[ctx["hour"]], ), ) cmap = plt.get_cmap(ctx["cmap"]) bins = [ 0.01, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.25, 2.5, 2.75, 3.0, 3.5, 4.0, 5.0, ] if ts.year < 2019: column = "hour%02i" % (hour, ) pgconn = get_dbconn("postgis") df = read_sql( """ WITH data as ( SELECT ugc, rank() OVER (PARTITION by ugc ORDER by valid DESC), hour01, hour03, hour06, hour12, hour24 from ffg WHERE valid >= %s and valid <= %s) SELECT *, substr(ugc, 3, 1) as ztype from data where rank = 1 """, pgconn, params=(ts - datetime.timedelta(hours=24), ts), index_col="ugc", ) df2 = df[df["ztype"] == "C"] plot.fill_ugcs( df2[column].to_dict(), bins, cmap=cmap, plotmissing=False, ilabel=ilabel, ) df2 = df[df["ztype"] == "Z"] plot.fill_ugcs( df2[column].to_dict(), bins, cmap=cmap, plotmissing=False, units="inches", ilabel=ilabel, ) else: # use grib data ts -= datetime.timedelta(hours=(ts.hour % 6)) ts = ts.replace(minute=0) fn = None for offset in range(0, 24, 4): ts2 = ts - datetime.timedelta(hours=offset) testfn = ts2.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/ffg/" "5kmffg_%Y%m%d00.grib2")) if os.path.isfile(testfn): fn = testfn break if fn is None: raise NoDataFound("No valid grib data found!") grbs = pygrib.index(fn, "stepRange") grb = grbs.select(stepRange="0-%s" % (hour, ))[0] lats, lons = grb.latlons() data = (masked_array(grb.values, data_units=units("mm")).to(units("inch")).m) plot.pcolormesh(lons, lats, data, bins, cmap=cmap) if ilabel: plot.drawcounties() df = pd.DataFrame() return plot.fig, df
def test_drawugcs(): """test drawing of UGCS""" mp = MapPlot(sector='conus', title='Counties, 3 filled in Iowa', nocaption=True) mp.fill_ugcs({"IAC001": 10, "IAC003": 20, "IAC005": 30}) return mp.fig
def plotter(fdict): """ Go """ import matplotlib matplotlib.use("agg") import matplotlib.pyplot as plt from pyiem.plot import MapPlot pgconn = psycopg2.connect(database="postgis", host="iemdb", user="******") cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) state = fdict.get("state", "IA") phenomena = fdict.get("phenomena", "TO") significance = fdict.get("significance", "A") station = fdict.get("station", "DMX")[:4] sdate = datetime.datetime.strptime(fdict.get("sdate", "2015-01-01"), "%Y-%m-%d") edate = datetime.datetime.strptime(fdict.get("edate", "2015-01-01"), "%Y-%m-%d") t = fdict.get("t", "state") varname = fdict.get("v", "lastyear") year = int(fdict.get("year", 2015)) year2 = int(fdict.get("year2", 2015)) ilabel = fdict.get("ilabel", "no") == "yes" nt = NetworkTable("WFO") if varname == "lastyear": if t == "cwa": cursor.execute( """ select ugc, max(issue at time zone 'UTC') from warnings WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance), ) else: cursor.execute( """ select ugc, max(issue at time zone 'UTC') from warnings WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance), ) rows = [] data = {} for row in cursor: rows.append(dict(valid=row[1], year=row[1].year, ugc=row[0])) data[row[0]] = row[1].year title = "Year of Last" datavar = "year" elif varname == "yearcount": table = "warnings_%s" % (year,) if t == "cwa": cursor.execute( """ select ugc, count(*) from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance), ) else: cursor.execute( """ select ugc, count(*) from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance), ) rows = [] data = {} for row in cursor: rows.append(dict(count=row[1], year=year, ugc=row[0])) data[row[0]] = row[1] title = "Count for %s" % (year,) datavar = "count" elif varname == "total": table = "warnings" if t == "cwa": cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance, sdate, edate), ) else: cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, sdate, edate), ) rows = [] data = {} for row in cursor: rows.append(dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = "Total between %s and %s" % (sdate.strftime("%d %b %Y"), edate.strftime("%d %b %Y")) datavar = "count" elif varname == "yearavg": table = "warnings" if t == "cwa": cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, ( station if len(station) == 3 else station[1:], phenomena, significance, datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1), ), ) else: cursor.execute( """ select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1)), ) rows = [] data = {} minv = datetime.datetime(2050, 1, 1) maxv = datetime.datetime(1986, 1, 1) for row in cursor: if row[2] < minv: minv = row[2] if row[3] > maxv: maxv = row[3] rows.append(dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = ("Yearly Avg: %s and %s") % (minv.strftime("%d %b %Y"), maxv.strftime("%d %b %Y")) datavar = "average" if len(rows) == 0: return "Sorry, no data found for query!" df = pd.DataFrame(rows) if varname == "yearavg": years = maxv.year - minv.year + 1 df["average"] = df["count"] / years for key in data: data[key] = round(data[key] / float(years), 2) bins = range(0, int(np.max(df[datavar][:])) + 2, 1) else: bins = range(np.min(df[datavar][:]), np.max(df[datavar][:]) + 2, 1) if len(bins) < 3: bins.append(bins[-1] + 1) if len(bins) > 8: bins = np.linspace(np.min(df[datavar][:]), np.max(df[datavar][:]) + 2, 8, dtype="i") subtitle = "based on IEM Archives of NWS WWA" if t == "cwa": subtitle = "Plotted for %s (%s), %s" % (nt.sts[station]["name"], station, subtitle) else: subtitle = "Plotted for %s, %s" % (state_names[state], subtitle) m = MapPlot( sector=("state" if t == "state" else "cwa"), state=state, cwa=(station if len(station) == 3 else station[1:]), axisbg="white", title=("%s %s %s (%s.%s)") % (title, vtec._phenDict[phenomena], vtec._sigDict[significance], phenomena, significance), subtitle=subtitle, nocaption=True, ) cmap = plt.get_cmap("Paired") cmap.set_over("white") cmap.set_under("white") m.fill_ugcs(data, bins, cmap=cmap, ilabel=ilabel) return m.fig, df
def plotter(fdict): """ Go """ import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from pyiem.plot import MapPlot pgconn = psycopg2.connect(database='postgis', host='iemdb', user='******') cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) state = fdict.get('state', 'IA') phenomena = fdict.get('phenomena', 'TO') significance = fdict.get('significance', 'A') station = fdict.get('station', 'DMX')[:4] sdate = datetime.datetime.strptime(fdict.get('sdate', '2015-01-01'), '%Y-%m-%d') edate = datetime.datetime.strptime(fdict.get('edate', '2015-01-01'), '%Y-%m-%d') t = fdict.get('t', 'state') varname = fdict.get('v', 'lastyear') year = int(fdict.get('year', 2015)) year2 = int(fdict.get('year2', 2015)) ilabel = (fdict.get('ilabel', 'no') == 'yes') nt = NetworkTable("WFO") if varname == 'lastyear': if t == 'cwa': cursor.execute(""" select ugc, max(issue at time zone 'UTC') from warnings WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance)) else: cursor.execute(""" select ugc, max(issue at time zone 'UTC') from warnings WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance)) rows = [] data = {} for row in cursor: rows.append(dict(valid=row[1], year=row[1].year, ugc=row[0])) data[row[0]] = row[1].year title = "Year of Last" datavar = "year" elif varname == 'yearcount': table = "warnings_%s" % (year, ) if t == 'cwa': cursor.execute(""" select ugc, count(*) from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance)) else: cursor.execute(""" select ugc, count(*) from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s GROUP by ugc """, (state, phenomena, significance)) rows = [] data = {} for row in cursor: rows.append(dict(count=row[1], year=year, ugc=row[0])) data[row[0]] = row[1] title = "Count for %s" % (year,) datavar = "count" elif varname == 'total': table = "warnings" if t == 'cwa': cursor.execute(""" select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance, sdate, edate)) else: cursor.execute(""" select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, sdate, edate)) rows = [] data = {} for row in cursor: rows.append(dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = "Total between %s and %s" % (sdate.strftime("%d %b %Y"), edate.strftime("%d %b %Y")) datavar = "count" elif varname == 'yearavg': table = "warnings" if t == 'cwa': cursor.execute(""" select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE wfo = %s and phenomena = %s and significance = %s and issue >= %s and issue <= %s GROUP by ugc """, (station if len(station) == 3 else station[1:], phenomena, significance, datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1))) else: cursor.execute(""" select ugc, count(*), min(issue at time zone 'UTC'), max(issue at time zone 'UTC') from """ + table + """ WHERE substr(ugc, 1, 2) = %s and phenomena = %s and significance = %s and issue >= %s and issue < %s GROUP by ugc """, (state, phenomena, significance, datetime.date(year, 1, 1), datetime.date(year2 + 1, 1, 1))) rows = [] data = {} minv = datetime.datetime(2050, 1, 1) maxv = datetime.datetime(1986, 1, 1) for row in cursor: if row[2] < minv: minv = row[2] if row[3] > maxv: maxv = row[3] rows.append(dict(count=row[1], year=year, ugc=row[0], minissue=row[2], maxissue=row[3])) data[row[0]] = row[1] title = ("Yearly Avg: %s and %s" ) % (minv.strftime("%d %b %Y"), maxv.strftime("%d %b %Y")) datavar = "average" if len(rows) == 0: return("Sorry, no data found for query!") df = pd.DataFrame(rows) if varname == 'yearavg': years = maxv.year - minv.year + 1 df['average'] = df['count'] / years for key in data: data[key] = round(data[key] / float(years), 2) bins = range(0, int(np.max(df[datavar][:]))+2, 1) else: bins = range(np.min(df[datavar][:]), np.max(df[datavar][:])+2, 1) if len(bins) < 3: bins.append(bins[-1]+1) if len(bins) > 8: bins = np.linspace(np.min(df[datavar][:]), np.max(df[datavar][:])+2, 8, dtype='i') subtitle = "based on IEM Archives of NWS WWA" if t == 'cwa': subtitle = "Plotted for %s (%s), %s" % (nt.sts[station]['name'], station, subtitle) else: subtitle = "Plotted for %s, %s" % (state_names[state], subtitle) m = MapPlot(sector=('state' if t == 'state' else 'cwa'), state=state, cwa=(station if len(station) == 3 else station[1:]), axisbg='white', title=('%s %s %s (%s.%s)' ) % (title, vtec._phenDict[phenomena], vtec._sigDict[significance], phenomena, significance), subtitle=subtitle, nocaption=True, titlefontsize=16 ) cmap = plt.get_cmap('Paired') cmap.set_over('white') cmap.set_under('white') m.fill_ugcs(data, bins, cmap=cmap, ilabel=ilabel) return m.fig, df