def plot_with_lines(year=None,target=None): fig = plt.figure(figsize=(10,5)) gs = gsp.GridSpec(1, 2, width_ratios=[2,1] ) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) wprof = parse_data.windprof(year) wp = np.squeeze(pandas2stack(wprof.dframe[target])) wp_ma = ma.masked_where(np.isnan(wp),wp) X,Y=wprof.time,wprof.hgt ax1.pcolormesh(X,Y,wp_ma,vmin=0,vmax=360) ax1.xaxis.set_major_locator(mdates.MonthLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax1.set_xlabel(r'$ Time \rightarrow$') ax1.set_ylabel('height gate') for prof in range(wp.shape[1]): x = wp[:,prof] y = range(wp.shape[0]) ax2.plot(x,y,color='r',alpha=0.05) # ax2.scatter(x,y,color='r',alpha=0.05) ax2.set_yticklabels('') ax2.set_xlabel(target) ax1.set_title('BBY Windprof wdir') plt.tight_layout() plt.show(block=False)
def plot_with_lines(year=None, target=None): fig = plt.figure(figsize=(10, 5)) gs = gsp.GridSpec(1, 2, width_ratios=[2, 1]) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) wprof = parse_data.windprof(year) wp = np.squeeze(pandas2stack(wprof.dframe[target])) wp_ma = ma.masked_where(np.isnan(wp), wp) X, Y = wprof.time, wprof.hgt ax1.pcolormesh(X, Y, wp_ma, vmin=0, vmax=360) ax1.xaxis.set_major_locator(mdates.MonthLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax1.set_xlabel(r'$ Time \rightarrow$') ax1.set_ylabel('height gate') for prof in range(wp.shape[1]): x = wp[:, prof] y = range(wp.shape[0]) ax2.plot(x, y, color='r', alpha=0.05) # ax2.scatter(x,y,color='r',alpha=0.05) ax2.set_yticklabels('') ax2.set_xlabel(target) ax1.set_title('BBY Windprof wdir') plt.tight_layout() plt.show(block=False)
czd = czd.dframe.loc[first:last] bby = bby.dframe.loc[first:last] ''' select rainy days ''' rain_czd = czd.precip > 0 rain_dates = rain_czd.loc[rain_czd.values].index # rain_dates = None if rain_dates is None: wd = pd.DataFrame(index=wspd.index,columns=range(16)) ws = pd.DataFrame(index=wspd.index,columns=range(16)) wd.iloc[:,0] = bby.wdir ws.iloc[:,0] = bby.wspd wdir = pandas2stack(wdir).T wspd = pandas2stack(wspd).T wd.iloc[:,1:]=np.squeeze(wdir[:,:15]) ws.iloc[:,1:]=np.squeeze(wspd[:,:15]) else: wd = pd.DataFrame(index=wspd.loc[rain_dates].index, columns=range(16)) ws = pd.DataFrame(index=wspd.loc[rain_dates].index, columns=range(16)) wd.iloc[:,0] = bby.wdir.loc[rain_dates] ws.iloc[:,0] = bby.wspd.loc[rain_dates] wdir = pandas2stack(wdir.loc[rain_dates]).T
def process(year=[],wdsurf=None, wdwpro=None,rainbb=None, raincz=None, nhours=None): binss={'wdir':np.arange(0,370,10), 'wspd':np.arange(0,36,1)} target = ['wdir','wspd'] arrays = {} for t in target: first = True for y in year: print('Processing year {}'.format(y)) ' tta analysis ' tta = tta_analysis(y) tta.start_df(wdir_surf=wdsurf, wdir_wprof=wdwpro, rain_bby=rainbb, rain_czd=raincz, nhours=nhours) ' retrieve dates ' include_dates = tta.include_dates tta_dates = tta.tta_dates notta_dates = tta.notta_dates ' read wprof ' wprof_df = parse_data.windprof(y) wprof = wprof_df.dframe[t] ' wprof partition ' wprof = wprof.loc[include_dates] # all included wprof_tta = wprof.loc[tta_dates] # only tta wprof_notta = wprof.loc[notta_dates]# only notta s1 = np.squeeze(pandas2stack(wprof)) s2 = np.squeeze(pandas2stack(wprof_tta)) s3 = np.squeeze(pandas2stack(wprof_notta)) if first: wp = s1 wp_tta = s2 wp_notta = s3 first = False else: wp = np.hstack((wp,s1)) wp_tta = np.hstack((wp_tta,s2)) wp_notta = np.hstack((wp_notta, s3)) _,wp_hours = wp.shape _,tta_hours = wp_tta.shape _,notta_hours = wp_notta.shape arrays[t]=[wp,wp_tta,wp_notta] ' makes CFAD ' hist_array_spd = np.empty((40,len(binss['wspd'])-1,3)) hist_array_dir = np.empty((40,len(binss['wdir'])-1,3)) cfad_array_spd = np.empty((40,len(binss['wspd'])-1,3)) cfad_array_dir = np.empty((40,len(binss['wdir'])-1,3)) average_spd = np.empty((40,3)) average_dir = np.empty((40,3)) median_spd = np.empty((40,3)) median_dir = np.empty((40,3)) for k,v in arrays.iteritems(): hist_array = np.empty((40,len(binss[k])-1,3)) cfad_array = np.empty((40,len(binss[k])-1,3)) average = np.empty((40,3)) median = np.empty((40,3)) wp = v[0] wp_tta = v[1] wp_notta = v[2] for hgt in range(wp.shape[0]): row1 = wp[hgt,:] row2 = wp_tta[hgt,:] row3 = wp_notta[hgt,:] for n,r in enumerate([row1,row2,row3]): ' following CFAD Yuter et al (1995) ' freq,bins=np.histogram(r[~np.isnan(r)], bins=binss[k]) hist_array[hgt,:,n] = freq cfad_array[hgt,:,n] = 100.*(freq/float(freq.sum())) bin_middle = (bins[1:]+bins[:-1])/2. average[hgt,n] = np.sum(freq*bin_middle)/freq.sum() median[hgt,n] = np.percentile(r[~np.isnan(r)],50) if k == 'wspd': hist_array_spd = hist_array cfad_array_spd = cfad_array average_spd = average median_spd = median else: hist_array_dir = hist_array cfad_array_dir = cfad_array average_dir = average median_dir = median return [hist_array_spd, hist_array_dir, cfad_array_spd, cfad_array_dir, binss['wspd'], binss['wdir'], wprof_df.hgt, wp_hours, tta_hours, notta_hours, average_spd, average_dir, median_spd, median_dir]
def processv2(year=[],wdsurf=None, wdwpro=None,rainbb=None, raincz=None, nhours=None): ''' v2: target loop moved into year loop ''' binss={'wdir': np.arange(0,370,10), 'wspd': np.arange(0,36,1), 'u': np.arange(-15,21,1), 'v': np.arange(-14,21,1), } target = ['wdir','wspd'] arrays = {} wsp = np.empty((40,1)) wsp_tta = np.empty((40,1)) wsp_notta = np.empty((40,1)) wdr = np.empty((40,1)) wdr_tta = np.empty((40,1)) wdr_notta = np.empty((40,1)) for y in year: print('Processing year {}'.format(y)) ' tta analysis ' tta = tta_analysis(y) tta.start_df(wdir_surf = wdsurf, wdir_wprof = wdwpro, rain_bby = rainbb, rain_czd = raincz, nhours = nhours) ' retrieve dates ' include_dates = tta.include_dates tta_dates = tta.tta_dates notta_dates = tta.notta_dates ' read wprof ' wprof_df = parse_data.windprof(y) for n,t in enumerate(target): wprof = wprof_df.dframe[t] ' wprof partition ' wprof = wprof.loc[include_dates] # all included wprof_tta = wprof.loc[tta_dates] # only tta wprof_notta = wprof.loc[notta_dates]# only notta s1 = np.squeeze(pandas2stack(wprof)) if wprof_tta.size > 0: s2 = np.squeeze(pandas2stack(wprof_tta)) ttaok = True else: ttaok =False s3 = np.squeeze(pandas2stack(wprof_notta)) if t == 'wdir': wdr = np.hstack((wdr,s1)) if ttaok is True: if s2.ndim == 1: s2=np.expand_dims(s2,axis=1) wdr_tta = np.hstack((wdr_tta,s2)) wdr_notta = np.hstack((wdr_notta, s3)) else: wsp = np.hstack((wsp,s1)) if ttaok is True: if s2.ndim == 1: s2=np.expand_dims(s2,axis=1) wsp_tta = np.hstack((wsp_tta,s2)) wsp_notta = np.hstack((wsp_notta, s3)) arrays['wdir']=[wdr,wdr_tta,wdr_notta] arrays['wspd']=[wsp,wsp_tta,wsp_notta] uw = -wsp*np.sin(np.radians(wdr)) uw_tta = -wsp_tta*np.sin(np.radians(wdr_tta)) uw_notta = -wsp_notta*np.sin(np.radians(wdr_notta)) vw = -wsp*np.cos(np.radians(wdr)) vw_tta = -wsp_tta*np.cos(np.radians(wdr_tta)) vw_notta = -wsp_notta*np.cos(np.radians(wdr_notta)) arrays['u']=[uw,uw_tta,uw_notta] arrays['v']=[vw,vw_tta,vw_notta] ''' total hours, first rows are empty ''' _,wp_hours = wsp.shape _,tta_hours = wsp_tta.shape _,notta_hours = wsp_notta.shape wp_hours -= 1 tta_hours-= 1 notta_hours -= 1 ' initialize arrays ' hist_array_spd = np.empty((40,len(binss['wspd'])-1,3)) hist_array_dir = np.empty((40,len(binss['wdir'])-1,3)) cfad_array_spd = np.empty((40,len(binss['wspd'])-1,3)) cfad_array_dir = np.empty((40,len(binss['wdir'])-1,3)) average_spd = np.empty((40,3)) average_dir = np.empty((40,3)) median_spd = np.empty((40,3)) median_dir = np.empty((40,3)) ' loop for variable (wdir,wspd) ' for k,v in arrays.iteritems(): hist_array = np.empty((40,len(binss[k])-1,3)) cfad_array = np.empty((40,len(binss[k])-1,3)) average = np.empty((40,3)) median = np.empty((40,3)) ' extract value' wp = v[0] wp_tta = v[1] wp_notta = v[2] ' makes CFAD ' for hgt in range(wp.shape[0]): row1 = wp[hgt,:] row2 = wp_tta[hgt,:] row3 = wp_notta[hgt,:] for n,r in enumerate([row1,row2,row3]): ' following CFAD Yuter et al (1995) ' freq,bins=np.histogram(r[~np.isnan(r)], bins=binss[k]) hist_array[hgt,:,n] = freq cfad_array[hgt,:,n] = 100.*(freq/float(freq.sum())) bin_middle = (bins[1:]+bins[:-1])/2. average[hgt,n] = np.sum(freq*bin_middle)/freq.sum() median[hgt,n] = np.percentile(r[~np.isnan(r)],50) if k == 'wspd': hist_array_spd = hist_array cfad_array_spd = cfad_array average_spd = average median_spd = median elif k == 'wdir': hist_array_dir = hist_array cfad_array_dir = cfad_array average_dir = average median_dir = median elif k == 'u': hist_array_u = hist_array cfad_array_u = cfad_array average_u = average median_u = median elif k == 'v': hist_array_v = hist_array cfad_array_v = cfad_array average_v = average median_v = median return [hist_array_spd, hist_array_dir, hist_array_u, hist_array_v, cfad_array_spd, cfad_array_dir, cfad_array_u, cfad_array_v, binss['wspd'], binss['wdir'], binss['u'], binss['v'], wprof_df.hgt, wp_hours, tta_hours, notta_hours, average_spd, average_dir, average_u, average_v, median_spd, median_dir, median_u, median_v, ]
wdir = wpr.dframe.loc[first:last].wdir czd = czd.dframe.loc[first:last] bby = bby.dframe.loc[first:last] ''' select rainy days ''' rain_czd = czd.precip > 0 rain_dates = rain_czd.loc[rain_czd.values].index # rain_dates = None if rain_dates is None: wd = pd.DataFrame(index=wspd.index, columns=range(16)) ws = pd.DataFrame(index=wspd.index, columns=range(16)) wd.iloc[:, 0] = bby.wdir ws.iloc[:, 0] = bby.wspd wdir = pandas2stack(wdir).T wspd = pandas2stack(wspd).T wd.iloc[:, 1:] = np.squeeze(wdir[:, :15]) ws.iloc[:, 1:] = np.squeeze(wspd[:, :15]) else: wd = pd.DataFrame(index=wspd.loc[rain_dates].index, columns=range(16)) ws = pd.DataFrame(index=wspd.loc[rain_dates].index, columns=range(16)) wd.iloc[:, 0] = bby.wdir.loc[rain_dates] ws.iloc[:, 0] = bby.wspd.loc[rain_dates] wdir = pandas2stack(wdir.loc[rain_dates]).T
def plot_with_hist(year=None,target=None,normalized=True, pngsuffix=None): name={'wdir':'Wind Direction', 'wspd':'Wind Speed'} if target == 'wdir': vmin,vmax = [0,360] bins = np.arange(0,370,10) hist_xticks = np.arange(0,400,40) hist_xlim = [0,360] elif target == 'wspd': vmin,vmax = [0,30] bins = np.arange(0,36,1) hist_xticks = np.arange(0,40,5) hist_xlim = [0,35] fig = plt.figure(figsize=(20,5)) gs = gsp.GridSpec(1, 2, width_ratios=[3,1] ) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) wprof = parse_data.windprof(year) wp = np.squeeze(pandas2stack(wprof.dframe[target])) wp_ma = ma.masked_where(np.isnan(wp),wp) X,Y = wprof.time,wprof.hgt p = ax1.pcolormesh(X,Y,wp_ma,vmin=vmin,vmax=vmax) add_colorbar(ax1,p) ax1.xaxis.set_major_locator(mdates.MonthLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax1.set_xlabel(r'$ Time \rightarrow$') ax1.set_ylabel('Altitude [m] MSL') ax1.set_title('BBY Windprof '+name[target]) array = np.empty((40,len(bins)-1)) for hgt in range(wp.shape[0]): row = wp[hgt,:] freq,bins=np.histogram(row[~np.isnan(row)], bins=bins, density=normalized) array[hgt,:]=freq x = bins y = wprof.hgt p = ax2.pcolormesh(x,y,array,cmap='viridis') amin = np.amin(array) amax = np.amax(array) cbar = add_colorbar(ax2,p,size='4%',ticks=[amin,amax]) cbar.ax.set_yticklabels(['low','high']) ax2.set_xticks(hist_xticks) ax2.set_yticklabels('') ax2.set_xlabel(name[target]) ax2.set_xlim(hist_xlim) ax2.set_title('Normalized frequency') plt.tight_layout() if pngsuffix: out_name = 'wprof_{}_{}.png' plt.savefig(out_name.format(target,pngsuffix)) else: plt.show(block=False)
def plot_with_hist(year=None, target=None, normalized=True, pngsuffix=None): name = {'wdir': 'Wind Direction', 'wspd': 'Wind Speed'} if target == 'wdir': vmin, vmax = [0, 360] bins = np.arange(0, 370, 10) hist_xticks = np.arange(0, 400, 40) hist_xlim = [0, 360] elif target == 'wspd': vmin, vmax = [0, 30] bins = np.arange(0, 36, 1) hist_xticks = np.arange(0, 40, 5) hist_xlim = [0, 35] fig = plt.figure(figsize=(20, 5)) gs = gsp.GridSpec(1, 2, width_ratios=[3, 1]) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) wprof = parse_data.windprof(year) wp = np.squeeze(pandas2stack(wprof.dframe[target])) wp_ma = ma.masked_where(np.isnan(wp), wp) X, Y = wprof.time, wprof.hgt p = ax1.pcolormesh(X, Y, wp_ma, vmin=vmin, vmax=vmax) add_colorbar(ax1, p) ax1.xaxis.set_major_locator(mdates.MonthLocator()) ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax1.set_xlabel(r'$ Time \rightarrow$') ax1.set_ylabel('Altitude [m] MSL') ax1.set_title('BBY Windprof ' + name[target]) array = np.empty((40, len(bins) - 1)) for hgt in range(wp.shape[0]): row = wp[hgt, :] freq, bins = np.histogram(row[~np.isnan(row)], bins=bins, density=normalized) array[hgt, :] = freq x = bins y = wprof.hgt p = ax2.pcolormesh(x, y, array, cmap='viridis') amin = np.amin(array) amax = np.amax(array) cbar = add_colorbar(ax2, p, size='4%', ticks=[amin, amax]) cbar.ax.set_yticklabels(['low', 'high']) ax2.set_xticks(hist_xticks) ax2.set_yticklabels('') ax2.set_xlabel(name[target]) ax2.set_xlim(hist_xlim) ax2.set_title('Normalized frequency') plt.tight_layout() if pngsuffix: out_name = 'wprof_{}_{}.png' plt.savefig(out_name.format(target, pngsuffix)) else: plt.show(block=False)
def plot(year=[],target=None,pngsuffix=False, normalized=True, contourf=True, pdfsuffix=False, wdsurf=None, wdwpro=None, rainbb=None, raincz=None, nhours=None): name={'wdir':'Wind Direction', 'wspd':'Wind Speed'} if target == 'wdir': bins = np.arange(0,370,10) hist_xticks = np.arange(0,420,60) hist_xlim = [0,360] elif target == 'wspd': bins = np.arange(0,36,1) hist_xticks = np.arange(0,40,5) hist_xlim = [0,35] first = True for y in year: print('Processing year {}'.format(y)) ' tta analysis ' tta = tta_analysis(y) tta.start_df(wdir_surf=wdsurf, wdir_wprof=wdwpro, rain_bby=rainbb, rain_czd=raincz, nhours=nhours) ' retrieve dates ' include_dates = tta.include_dates tta_dates = tta.tta_dates notta_dates = tta.notta_dates ' read wprof ' wprof_df = parse_data.windprof(y) wprof = wprof_df.dframe[target] ' wprof partition ' wprof = wprof.loc[include_dates] # all included wprof_tta = wprof.loc[tta_dates] # only tta wprof_notta = wprof.loc[notta_dates]# only notta s1 = np.squeeze(pandas2stack(wprof)) s2 = np.squeeze(pandas2stack(wprof_tta)) s3 = np.squeeze(pandas2stack(wprof_notta)) if first: wp = s1 wp_tta = s2 wp_notta = s3 first = False else: wp = np.hstack((wp,s1)) wp_tta = np.hstack((wp_tta,s2)) wp_notta = np.hstack((wp_notta, s3)) _,wp_hours = wp.shape _,tta_hours = wp_tta.shape _,notta_hours = wp_notta.shape ' makes CFAD ' hist_array = np.empty((40,len(bins)-1,3)) for hgt in range(wp.shape[0]): row1 = wp[hgt,:] row2 = wp_tta[hgt,:] row3 = wp_notta[hgt,:] for n,r in enumerate([row1,row2,row3]): ' following CFAD Yuter et al (1995) ' freq,bins=np.histogram(r[~np.isnan(r)], bins=bins) if normalized: hist_array[hgt,:,n] = 100.*(freq/float(freq.sum())) else: hist_array[hgt,:,n] = freq fig,axs = plt.subplots(1,3,sharey=True,figsize=(10,8)) ax1 = axs[0] ax2 = axs[1] ax3 = axs[2] hist_wp = np.squeeze(hist_array[:,:,0]) hist_wptta = np.squeeze(hist_array[:,:,1]) hist_wpnotta = np.squeeze(hist_array[:,:,2]) x = bins y = wprof_df.hgt if contourf: X,Y = np.meshgrid(x,y) nancol = np.zeros((40,1))+np.nan hist_wp = np.hstack((hist_wp,nancol)) hist_wptta = np.hstack((hist_wptta,nancol)) hist_wpnotta = np.hstack((hist_wpnotta,nancol)) vmax=20 nlevels = 10 delta = int(vmax/nlevels) v = np.arange(2,vmax+delta,delta) cmap = cm.get_cmap('plasma') ax1.contourf(X,Y,hist_wp,v,cmap=cmap) p = ax2.contourf(X,Y,hist_wptta,v,cmap=cmap,extend='max') p.cmap.set_over(cmap(1.0)) ax3.contourf(X,Y,hist_wpnotta,v,cmap=cmap) cbar = add_colorbar(ax3,p,size='4%') else: p = ax1.pcolormesh(x,y,hist_wp,cmap='viridis') ax2.pcolormesh(x,y,hist_wptta,cmap='viridis') ax3.pcolormesh(x,y,hist_wpnotta,cmap='viridis') amin = np.amin(hist_wpnotta) amax = np.amax(hist_wpnotta) cbar = add_colorbar(ax3,p,size='4%',ticks=[amin,amax]) cbar.ax.set_yticklabels(['low','high']) ' --- setup ax1 --- ' amin = np.amin(hist_wp) amax = np.amax(hist_wp) ax1.set_xticks(hist_xticks) ax1.set_xlim(hist_xlim) ax1.set_ylim([0,4000]) txt = 'All profiles (n={})'.format(wp_hours) ax1.text(0.5,0.95,txt,fontsize=15, transform=ax1.transAxes,va='bottom',ha='center') ax1.set_ylabel('Altitude [m] MSL') ' --- setup ax2 --- ' amin = np.amin(hist_wptta) amax = np.amax(hist_wptta) ax2.set_xticks(hist_xticks) ax2.set_xlim(hist_xlim) ax2.set_ylim([0,4000]) ax2.set_xlabel(name[target]) txt = 'TTA (n={})'.format(tta_hours) ax2.text(0.5,0.95,txt,fontsize=15, transform=ax2.transAxes,va='bottom',ha='center') ' --- setup ax3 --- ' ax3.set_xticks(hist_xticks) ax3.set_xlim(hist_xlim) ax3.set_ylim([0,4000]) txt = 'NO-TTA (n={})'.format(notta_hours) ax3.text(0.5,0.95,txt,fontsize=15, transform=ax3.transAxes,va='bottom',ha='center') title = 'Normalized frequencies of BBY wind profiles {} \n' title += 'TTA wdir_surf:{}, wdir_wp:{}, ' title += 'rain_bby:{}, rain_czd:{}, nhours:{}' if len(year) == 1: yy = 'year {}'.format(year[0]) else: yy = 'year {} to {}'.format(year[0],year[-1]) plt.suptitle(title.format(yy, wdsurf, wdwpro, rainbb, raincz, nhours), fontsize=15) plt.subplots_adjust(top=0.9,left=0.1,right=0.95,bottom=0.1, wspace=0.1) if pngsuffix: out_name = 'wprof_{}_cfad{}.png' plt.savefig(out_name.format(target,pngsuffix)) plt.close() elif pdfsuffix: out_name = 'wprof_{}_cfad{}.pdf' plt.savefig(out_name.format(target,pdfsuffix)) plt.close() else: plt.show()
target='wdir' for n, y in enumerate([0, 1998] + range(2001, 2013)): if n == 0: ax[n].axis('off') else: print y if n != 1: ax[n].set_yticklabels('') if n == 12: ax[n].set_xlabel(r'$ Time \rightarrow$') # parse windprof dataframe with wspd and wdir wprof = parse_data.windprof(y) wp = np.squeeze(pandas2stack(wprof.dframe[target])) # plot array # ax[n].imshow(wp, aspect='auto', origin='lower', # interpolation='none') X,Y = wprof.time,wprof.hgt wp_ma = ma.masked_where(np.isnan(wp),wp) ax[n].pcolormesh(X,Y,wp_ma) ax[n].xaxis.set_major_locator(mdates.MonthLocator()) ax[n].xaxis.set_major_formatter(mdates.DateFormatter('%b')) txt = 'Season: {}/{}' ax[n].text(0.05, 0.8, txt.format(str(y-1),str(y)), weight='bold', transform=ax[n].transAxes) fig.suptitle('BBY Windprof '+target)