def read_histogram(filenamehist, filenamemax, filenamemin, nx, ny, nbins,
                   ctl_dict, dtypein):

    my_hist = dict()  #Initialize my_hist dictionary

    #Max_val and Min_val are already dictionaries.
    max_val = kld = ctlr.read_data_grads(filenamemax, ctl_dict, masked=False)
    min_val = kld = ctlr.read_data_grads(filenamemin, ctl_dict, masked=False)

    nz = (np.max(ctl_dict['end_record']) +
          1) * nbins  #Total number of layers times the number of bins

    undef = np.float32(ctl_dict['undef'])
    #The total number of records is the total number of variables in the ctl times the number of bins in the histogram.
    nrecords = (ctl_dict['end_record'] + 1) * nbins

    tmp_data = bio.read_data_direct(filenamehist,
                                    nx,
                                    ny,
                                    nz,
                                    dtypein=dtypein,
                                    undef_in=undef,
                                    undef_out=undef,
                                    seq_acces=False)  #Read the data.

    print(np.max(tmp_data), np.min(tmp_data))
    #Loop over variables to create the dictionary.
    ini_record = ctl_dict['ini_record']

    ivar = 0

    for my_var in ctl_dict['var_list']:

        nlevs = int(ctl_dict['var_size'][ivar])
        if nlevs == 0:
            nlevs = int(1)

        my_hist[my_var] = dict()

        my_hist[my_var]['hist'] = np.ones((nx, ny, nlevs, nbins)) * undef

        my_hist[my_var]['minval'] = min_val[my_var]

        my_hist[my_var]['maxval'] = max_val[my_var]

        for iz in range(0, nlevs):

            my_hist[my_var]['hist'][:, :,
                                    iz, :] = tmp_data[:, :,
                                                      (ini_record[ivar] + iz) *
                                                      nbins:(ini_record[ivar] +
                                                             iz + 1) * nbins]

        ivar = ivar + 1

    return my_hist
#=========================================================
#  PLOT CROSS-SECTIONS OF REFLECTIVITY AND W.
#=========================================================

for iexp, my_exp in enumerate(expnames):

    mask = cmf.box_mask(cen_lon[0] - buffer_size, cen_lon[0] + buffer_size,
                        ini_lat[0], end_lat[0], lon, lat)

    #=========================================================
    #  READ
    #=========================================================

    my_file = basedir + expnames[iexp] + ctimes[0].strftime(
        "%Y%m%d%H%M%S") + '/' + filetype[0] + '/moment0001.grd'
    m01 = ctlr.read_data_grads(my_file, ctl_dict, masked=False, undef2nan=True)
    w = np.squeeze(np.delete(m01['w'], 4, 2))
    dbz3d = np.squeeze(np.delete(m01['dbz'], 4, 2))

    for ii in range(np.shape(w)[2]):
        w[:, :, ii][np.logical_not(mask)] = np.nan
        dbz3d[:, :, ii][np.logical_not(mask)] = np.nan
    w = np.squeeze(np.nanmax(w, 1))
    dbz = np.squeeze(np.nanmax(dbz3d, 1))

    if iexp == 0:
        ax = plt.axes([0.035, 0.12, 0.20, 0.84])
        my_title = '(a) 5MIN'
        #For the 5MIN experiment find the location with the maximum bimodality in W
        my_file = basedir + expnames[iexp] + ctimes[0].strftime(
            "%Y%m%d%H%M%S") + '/' + filetype[0] + '/bimodality_index.grd'
     latlon_file = basedir + '/' + my_exp + '/latlon/latlon.grd'

     tmp=ctlr.read_data_records(latlon_file,ctl=ctl_dict,records=np.array([0,1]))
     lat=tmp[:,:,1]
     lon=tmp[:,:,0]

     #Exclude areas outside the radar domain.
     radar_mask = cmf.distance_range_mask( lon_radar , lat_radar , radar_range , lon , lat )

  #=========================================================
  #  READ THE DATA
  #=========================================================

  my_file=basedir + '/' + my_exp + '/time_mean/'+ filetype + '/' + '/' + filename

  parameter[my_exp]=ctlr.read_data_grads(my_file,ctl_dict,masked=False)

  print('KLD values')
  print( my_exp )
  for var in parameter[my_exp] :
     print( var )
     print( np.min( parameter[my_exp][var] ) , np.max( parameter[my_exp][var] ) )

  my_file=basedir + '/' + my_exp + '/time_mean/'+ filetype + '/' + '/moment0001_mean.grd'

  ensemble_mean[my_exp]=ctlr.read_data_grads(my_file,ctl_dict,masked=False)
  
  print('State variable values')
  print( my_exp )
  for var in ensemble_mean[my_exp] :
     print( var )
Esempio n. 4
0
                                             undef=undef)

    parameter_5MIN[iv][parameter_5MIN[iv] == undef] = np.nan
    parameter_30SEC[iv][parameter_30SEC[iv] == undef] = np.nan

    print(my_var)
    print(np.nanmin(parameter_5MIN[iv]), np.nanmax(parameter_5MIN[iv]))
    print(np.nanmin(parameter_30SEC[iv]), np.nanmax(parameter_30SEC[iv]))

my_file = basedir + '/LE_D1_1km_30sec/time_mean/guesgp/moment0001_mean.grd'

max_dbz = np.squeeze(
    np.nanmax(
        np.delete(
            ctlr.read_data_grads(my_file,
                                 ctl_dict_2,
                                 masked=False,
                                 undef2nan=True)['dbz'], 4, 2), 2))
max_dbz[np.logical_not(radar_mask)] = np.nan

print('Finish reading data')

#=========================================================================================
#Plot the mean KLD and its standard deviation.
#=========================================================================================

#Start subplots
ncols = 3
nrows = 2
icoldelta = 1.0 / ncols
irowdelta = 1.0 / nrows
hmargin = 0.0
Esempio n. 5
0
      parameter_norain = dict()

      while ( ctime <= etime )  :

         print( ctime )

         print ( 'Reading data')

         #=========================================================
         #  READ THE DATA
         #=========================================================

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/' + file_name +'.grd'

         #Read all the variables and levels at once
         tmp_parameter = ctlr.read_data_grads(  my_file , ctl_dict ) 

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/moment0001.grd'       
 
         ensemble_mean = ctlr.read_data_grads(  my_file , ctl_dict )
 
         ensemble_mean[ ensemble_mean == undef ] = np.nan

         tmp_max_dbz = np.nanmax( ensemble_mean['dbz'] , 2 )

         for kk in range(0,nz)   :
            max_dbz[:,:,kk] = np.squeeze( tmp_max_dbz[:] )

         #Several mask can be defined here.

         rain_mask = np.logical_and( max_dbz > rain_threshold , radar_mask )
#=========================================================

lat_radar = 34.823
lon_radar = 135.523
radar_range = 60.0e3  #Radar range in meters (to define the radar mask)

#=========================================================
#  READ LAT LON AND TOPO
#=========================================================

latlon_file = basedir + '/LE_D1_1km_5min/latlon/latlon.grd'
latlon_ctl = basedir + '/LE_D1_1km_5min/latlon/latlon.ctl'

ctl_dict = ctlr.read_ctl(latlon_ctl)

my_data = ctlr.read_data_grads(latlon_file, ctl=ctl_dict)

lon = np.squeeze(my_data['glon'])
lat = np.squeeze(my_data['glat'])
topo = np.squeeze(my_data['topo'])

#Exclude areas outside the radar domain.
radar_mask = cmf.distance_range_mask(lon_radar, lat_radar, radar_range, lon,
                                     lat)

#=========================================================================================
#Plot the mean topography and PAWR radar range
#=========================================================================================

xtick = [134.0, 134.5, 135, 135.5, 136, 136.5, 137, 137.5]
ytick = [33.0, 33.5, 34, 34.5, 35, 35.5, 36.0, 36.5]
Esempio n. 7
0
    rmse[my_exp] = cf.gaussian_filter(field0=rmse[my_exp],
                                      dx=1.0,
                                      sigma=sigma_smooth,
                                      nx=nx,
                                      ny=ny,
                                      undef=undef)
    rmse[my_exp][rmse[my_exp] == undef] = np.nan

    print('RMSE values')
    print(my_exp)
    print(np.min(rmse[my_exp]), np.max(rmse[my_exp]))

    my_file = basedir + '/' + my_exp + '/time_mean/' + filetype + '/' + '/moment0001_mean.grd'

    ensemble_mean[my_exp] = np.delete(
        ctlr.read_data_grads(my_file, ctl_dict_2)['dbz'], 4, 2)
    ensemble_mean[my_exp] = np.max(ensemble_mean[my_exp], 2)
    ensemble_mean[my_exp] = cf.gaussian_filter(field0=ensemble_mean[my_exp],
                                               dx=1.0,
                                               sigma=sigma_smooth,
                                               nx=nx,
                                               ny=ny,
                                               undef=undef)
    ensemble_mean[my_exp][ensemble_mean[my_exp] == undef] = np.nan

    print('State variable values')
    print(my_exp)
    print(np.min(ensemble_mean[my_exp]), np.max(ensemble_mean[my_exp]))

print(' Finish the loop over experiments ')
Esempio n. 8
0
    print(ctime)

    print('Reading the moments and computing growth rate ')

    #=========================================================
    #  READ THE DATA
    #=========================================================

    ptime = ctime - delta_data

    #Analysis kld at time T - 1
    my_file = basedir + expname + ptime.strftime(
        "%Y%m%d%H%M%S") + '/guesgp/kldistance.grd'

    kld_p = ctlr.read_data_grads(my_file,
                                 ctl_dict,
                                 masked=False,
                                 undef2nan=True)

    #Gues kld at time T
    my_file = basedir + expname + ctime.strftime(
        "%Y%m%d%H%M%S") + '/guesgp/kldistance.grd'

    kld_c = ctlr.read_data_grads(my_file,
                                 ctl_dict,
                                 masked=False,
                                 undef2nan=True)

    kldgr = dict()
    sprdgr = dict()
    #Compute KLD growth
    for my_var in plot_variables:
Esempio n. 9
0
        hist_analysis = chf.read_histogram(hist_file,
                                           max_file,
                                           min_file,
                                           nx,
                                           ny,
                                           nbins,
                                           ctl_dict,
                                           dtypein='i2')

        # hist_properties=analyze_histogram_fun( my_hist , thresholdmin )

        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/guesgp/' + '/moment0001.grd'
        ens_mean_forecast = ctlr.read_data_grads(my_file,
                                                 ctl_dict,
                                                 masked=False,
                                                 undef2nan=True)
        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/analgp/' + '/moment0001.grd'
        ens_mean_analysis = ctlr.read_data_grads(my_file,
                                                 ctl_dict,
                                                 masked=False,
                                                 undef2nan=True)

        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/guesgp/' + '/moment0002.grd'
        ens_var_forecast = ctlr.read_data_grads(my_file,
                                                ctl_dict,
                                                masked=False,
                                                undef2nan=True)
        my_file = basedir + expname + ctime.strftime(
         #  READ THE DATA
         #=========================================================

         my_file=basedir + '/' + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/' + filetype + '/update_comp_meandiff_vobs_' + var_obs + '_upd_obs_' + var_upd + '_ens_size_' + str(nbv) + '_obs_inc_' + str(obs_increment[iv]) + '.grd'

         #Read all the variables and levels at once
         parameter = ctlr.read_data(  my_file , ctl_dict , undef2nan = True ) 

         my_file=basedir + '/' + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/' + filetype + '/update_comp_kld_vobs_' + var_obs + '_upd_obs_' + var_upd + '_ens_size_' + str(nbv) + '_obs_inc_' + str(obs_increment[iv]) + '.grd'

         #Read all the variables and levels at once
         kld = ctlr.read_data(  my_file , ctl_dict , undef2nan = True )

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/moment0001.grd'       

         max_dbz =  np.squeeze( np.nanmax( ctlr.read_data_grads(  my_file , ctl_dict_2 , undef2nan = True )['dbz']  , 2 ) )

         #Several mask can be defined here.

         rain_mask = np.logical_and( max_dbz > rain_threshold , radar_mask )

         norain_mask = np.logical_and( max_dbz < norain_threshold , radar_mask )


         for kk in range( 0 , nz ) :

            nan_mask = np.logical_not( np.isnan( np.squeeze( parameter[:,:,kk] ) ) ) 
 
            tmp_rain_mask = np.logical_and( nan_mask , rain_mask )
            tmp_norain_mask = np.logical_and( nan_mask , norain_mask )
Esempio n. 11
0
      it=0
      ctime = itime
      while ( ctime <= etime )  :

         print( ctime )

         print ( 'Reading data')

         #=========================================================
         #  OBTAIN THE RAIN MASK
         #=========================================================

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/moment0001.grd'

         #Read all the variables and levels at once
         ensemble_mean = ctlr.read_data_grads(  my_file , ctl_dict_2 , undef2nan = True ) 

         max_dbz = np.squeeze( np.nanmax( np.delete( ensemble_mean['dbz'] , 4 , 2) , 2 ) )

         rain_mask = np.logical_and( max_dbz > rain_threshold , radar_mask )

         norain_mask = np.logical_and( max_dbz < norain_threshold , radar_mask )

         #=========================================================
         #  READ THE DATA
         #=========================================================

         my_file=basedir + '/' + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/update_comp_meandiff_vobs_' + var_obs + '_upd_obs_' + var_upd + '_ens_size_' + str(nbv) + '_obs_inc_' + str(obs_increment[iv]) + '.grd'
         update_mean_diff = np.squeeze( ctlr.read_data(  my_file , ctl_dict , undef2nan = True ) )
         my_file=basedir + '/' + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/guesgp/update_comp_updated_mean_kf_vobs_' + var_obs + '_upd_obs_' + var_upd + '_ens_size_' + str(nbv) + '_obs_inc_' + str(obs_increment[iv]) + '.grd'
         update_mean_kf = np.squeeze( ctlr.read_data(  my_file , ctl_dict , undef2nan = True ) )
Esempio n. 12
0
exps = ['LE_D1_1km_5min']

lat_radar = 34.823
lon_radar = 135.523
radar_range = 60.0e3  #Radar range in meters (to define the radar mask)

#=========================================================
#  READ LAT LON
#=========================================================

latlon_file = basedir + '/LE_D1_1km_5min/latlon/latlon.grd'
latlon_ctl = basedir + '/LE_D1_1km_5min/latlon/latlon.ctl'

ctl_dict = ctlr.read_ctl(latlon_ctl)

my_data = ctlr.read_data_grads(latlon_file, ctl=ctl_dict)

lon = np.squeeze(my_data['glon'])
lat = np.squeeze(my_data['glat'])
topo = np.squeeze(my_data['topo'])

#Exclude areas outside the radar domain.
radar_mask = cmf.distance_range_mask(lon_radar, lat_radar, radar_range, lon,
                                     lat)

#=========================================================================================
#Plot the mean topography and PAWR radar range
#=========================================================================================

xtick = [134.0, 134.5, 135, 135.5, 136, 136.5, 137, 137.5]
ytick = [33.0, 33.5, 34, 34.5, 35, 35.5, 36.0, 36.5]
Esempio n. 13
0
            time_diff = np.round(
                time_diff /
                delta[iexp].total_seconds()) * delta[iexp].total_seconds()

            my_time = itime + dt.timedelta(seconds=time_diff)

            print(my_exp, my_time.strftime("%Y%m%d%H%M%S"),
                  ctime.strftime("%Y%m%d%H%M%S"))

            #Read KLD
            my_file = basedir + my_exp + '/' + my_time.strftime(
                "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/kldistance.grd'
            print(my_file)

            my_kld = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

            for var in plot_variables:
                if var in my_kld:
                    kld[var][:, :, :, iexp] = np.squeeze(my_kld[var])

            my_file = basedir + my_exp + '/' + my_time.strftime(
                "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/moment0001.grd'
            my_mean = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

            #Compute max_dbz (we will use this to identify areas associated with clouds and convection)
            max_dbz[:, :, iexp] = np.squeeze(np.nanmax(my_mean['dbz'], 2))

            iexp = iexp + 1
        #=======================================================================================
        #Plot KLD
Esempio n. 14
0
        kld[my_exp] = cf.gaussian_filter(field0=kld[my_exp],
                                         dx=1.0,
                                         sigma=sigma_smooth,
                                         nx=nx,
                                         ny=ny,
                                         undef=undef)
        kld[my_exp][kld[my_exp] == undef] = np.nan

        print('KLD values')
        print(my_exp)
        print(np.nanmin(kld[my_exp]), np.nanmax(kld[my_exp]))

        my_file = basedir + '/' + my_exp + '/time_mean/' + filetype + '/' + '/moment0001_mean.grd'

        ensemble_mean[my_exp] = np.delete(
            ctlr.read_data_grads(my_file, ctl_dict_2, undef2nan=False)['dbz'],
            4, 2)
        ensemble_mean[my_exp] = np.nanmax(ensemble_mean[my_exp], 2)
        ensemble_mean[my_exp] = cf.gaussian_filter(
            field0=ensemble_mean[my_exp],
            dx=1.0,
            sigma=sigma_smooth,
            nx=nx,
            ny=ny,
            undef=undef)

        print('State variable values')
        print(my_exp)
        print(np.nanmin(ensemble_mean[my_exp]),
              np.nanmax(ensemble_mean[my_exp]))
Esempio n. 15
0
    while (ctime <= etime):

        times[it] = it

        print(ctime)

        print('Reading the skew ')

        #=========================================================
        #  READ THE DATA
        #=========================================================

        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/moment0003.grd'

        skew = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/moment0002.grd'

        variance = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        #Compute the skewness using the 3rd and 2nd order moments.
        for var in skew:
            my_mask = np.logical_and(np.logical_not(skew[var] == undef),
                                     np.logical_not(variance[var] == 0))
            skew[var][my_mask] = skew[var][my_mask] / np.power(
                variance[var][my_mask], 3 / 2)

            skew[var][np.logical_not(my_mask)] = np.nan
  while ( ctime <= etime ):

    print( ctime )

    print ( 'Reading the histogram ')

    hist_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/histogram.grd'
    max_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/ensmax.grd'
    min_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/ensmin.grd'

    hist=chf.read_histogram(hist_file,max_file,min_file,nx,ny,nbins,ctl_dict,dtypein='i2')

    # hist_properties=analyze_histogram_fun( my_hist , thresholdmin )
   
    my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/moment0001.grd'
    ens_mean=ctlr.read_data_grads(my_file,ctl_dict,masked=False)
    my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/moment0002.grd'
    ens_var=ctlr.read_data_grads(my_file,ctl_dict,masked=False)
    #my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/moment0003.grd'
    #ens_skew=ctlr.read_data_grads(my_file,ctl_dict,masked=False)
    #my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/moment0004.grd'
    #ens_kurt=ctlr.read_data_grads(my_file,ctl_dict,masked=False)
    my_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/kldistance.grd'
    ens_kld=ctlr.read_data_grads(my_file,ctl_dict,masked=False)


    #Get the location of the maximum kld for the selected variables.
    for var in plot_variables    :
       if var in ens_kld     :
         if ( np.size( np.shape( ens_kld[var] ) ) >= 2 )  :
            nz = np.shape(ens_kld[var])[2] 
Esempio n. 17
0
      parameter_norain = dict()

      while ( ctime <= etime )  :

         print( ctime )

         print ( 'Reading data')

         #=========================================================
         #  READ THE DATA
         #=========================================================

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/' + '/' + file_name +'.grd'

         #Read all the variables and levels at once
         tmp_parameter = ctlr.read_data_grads(  my_file , ctl_dict , undef2nan = True ) 

         my_file=basedir + my_exp + '/' + ctime.strftime("%Y%m%d%H%M%S") + '/'+ my_file_type + '/moment0001.grd'     

         ensemble_mean = ctlr.read_data_grads(  my_file , ctl_dict , undef2nan = True )

         tmp_max_dbz = np.nanmax( ensemble_mean['dbz'] , 2 )


         for kk in range(0,nz)   :
            max_dbz[:,:,kk] = np.squeeze( tmp_max_dbz[:] )

         #Several mask can be defined here.

         rain_mask = np.logical_and( max_dbz > rain_threshold , radar_mask )
        print(ctime)

        print('Reading the moments and computing growth rate ')

        #=========================================================
        #  READ THE DATA
        #=========================================================

        ptime = ctime - delta

        #Analysis spread at time T
        my_file = basedir + expname + ptime.strftime(
            "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/kldistance.grd'

        kld_p = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        #Gues spread at time T+1
        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/kldistance.grd'

        kld_c = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        #Compute remove undef values, compute kld growth
        #and smooth the result.
        for my_var in kld_c:
            if my_var in plot_variables:
                kld_c[my_var][kld_c[my_var] == undef] = 0  #np.nan
                kld_p[my_var][kld_p[my_var] == undef] = 0  #np.nan
                kld_c[my_var][kld_c[my_var] > 100] = 0  #np.nan
                kld_p[my_var][kld_p[my_var] > 100] = 0  #np.nan
Esempio n. 19
0
    if iexp == 1:
        my_title_2 = '30SEC'

    irow = iexp

    mask = cmf.box_mask(cen_lon[0] - buffer_size, cen_lon[0] + buffer_size,
                        ini_lat[0], end_lat[0], lon, lat)

    #=========================================================
    #  READ
    #=========================================================

    my_file = basedir + expnames[iexp] + ctimes[0].strftime(
        "%Y%m%d%H%M%S") + '/' + filetypes[0] + '/moment0001.grd'
    m01 = ctlr.read_data_grads(my_file, ctl_dict, masked=False, undef2nan=True)
    my_file = basedir + expnames[iexp] + ctimes[0].strftime(
        "%Y%m%d%H%M%S") + '/' + filetypes[0] + '/moment0002.grd'
    m02 = ctlr.read_data_grads(my_file, ctl_dict, masked=False, undef2nan=True)
    my_file = basedir + expnames[iexp] + ctimes[0].strftime(
        "%Y%m%d%H%M%S") + '/' + filetypes[0] + '/kldistance.grd'
    kld = ctlr.read_data_grads(my_file, ctl_dict, masked=False, undef2nan=True)

    #Compute the variables that we want to plot over the cross section.
    for ivar, my_var in enumerate(variables):

        icol = ivar

        var_mean = np.squeeze(np.delete(m01[my_var], 4, 2))
        var_kld = np.squeeze(np.delete(kld[my_var], 4, 2))
        var_var = np.sqrt(np.squeeze(np.delete(m02[my_var], 4, 2)))
    it = 0
    while (ctime <= etime):

        print(ctime)

        print('Reading the bimodality ')

        #=========================================================
        #  READ THE DATA
        #=========================================================

        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S"
        ) + '/' + my_file_type + '/' + '/bimodality_index.grd'

        bimodality = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        max_bimodality = dict()

        for my_var in bimodality:
            bimodality[my_var][
                bimodality[my_var] >
                100] = np.nan  #Filter unreliable bimodalityn estimates.
            bimodality[my_var][
                bimodality[my_var] ==
                undef] = np.nan  #Filter undef bimodaility values.

            #Use the radar mask to define a masked array.
            tmp_max_bimodality = np.squeeze(np.nanmax(bimodality[my_var], 2))
            tmp_mask = np.logical_or(tmp_max_bimodality < min_bimodality_value,
                                     np.logical_not(radar_mask))
    hist_file = file_path + 'histogram.grd'
    max_file = file_path + 'ensmax.grd'
    min_file = file_path + 'ensmin.grd'
    mean_file = file_path + 'moment0001.grd'
    var_file = file_path + 'moment0002.grd'
    bim_file = file_path + 'bimodality_index.grd'
    kld_file = file_path + 'kldistance.grd'
    hist = chf.read_histogram(hist_file,
                              max_file,
                              min_file,
                              nx,
                              ny,
                              nbins,
                              ctl_dict,
                              dtypein='i2')
    ens_mean = ctlr.read_data_grads(mean_file, ctl_dict, masked=False)
    ens_var = ctlr.read_data_grads(var_file, ctl_dict, masked=False)
    ens_bim = ctlr.read_data_grads(bim_file, ctl_dict, masked=False)
    ens_kld = ctlr.read_data_grads(kld_file, ctl_dict, masked=False)

    for ivar, my_var in enumerate(varlist):

        #Get the histogram limits for the corresponding maximum bimodality index location.
        hist_max = np.squeeze(hist[my_var]['maxval'][ix, iy, iz])
        hist_min = np.squeeze(hist[my_var]['minval'][ix, iy, iz])
        if ivar == 1:
            hist_max = 1000 * hist_max
            hist_min = 1000 * hist_min

        hist_delta = (hist_max - hist_min) / nbins
        hist_range = hist_min + hist_delta / 2 + hist_delta * np.arange(