예제 #1
0
    kld_c = ctlr.read_data_grads(my_file,
                                 ctl_dict,
                                 masked=False,
                                 undef2nan=True)

    kldgr = dict()
    sprdgr = dict()
    #Compute KLD growth
    for my_var in plot_variables:
        kldgr[my_var] = np.squeeze(
            np.nanmean(np.delete(kld_c[my_var] - kld_p[my_var], 4, 2),
                       2)) / delta_data.total_seconds()

        #Smooth the growing rate.
        kldgr[my_var] = csf.gaussian_smooth_2d(kldgr[my_var], 2.0, 5.0)

    #Gues spread at time t
    my_file = basedir + expname + ptime.strftime(
        "%Y%m%d%H%M%S") + '/guesgp/moment0002.grd'

    sprd_p = ctlr.read_data_grads(my_file,
                                  ctl_dict,
                                  masked=False,
                                  undef2nan=True)

    #Analysis spread at time t
    my_file = basedir + expname + ctime.strftime(
        "%Y%m%d%H%M%S") + '/guesgp/moment0002.grd'

    sprd_c = ctlr.read_data_grads(my_file,
        kld_c = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        #Compute remove undef values, compute kld growth
        #and smooth the result.
        for my_var in kld_c:
            if my_var in plot_variables:
                kld_c[my_var][kld_c[my_var] == undef] = 0  #np.nan
                kld_p[my_var][kld_p[my_var] == undef] = 0  #np.nan
                kld_c[my_var][kld_c[my_var] > 100] = 0  #np.nan
                kld_p[my_var][kld_p[my_var] > 100] = 0  #np.nan
                kldgr[my_var] = (kld_c[my_var] -
                                 kld_p[my_var]) / delta.total_seconds()

                #Smooth the growing rate.
                kldgr[my_var][:, :, :, 0] = csf.gaussian_smooth_2d(
                    kldgr[my_var], 2.0, 5.0)

        #Read the ensemble mean to get the information from the storm location.
        my_file = basedir + expname + ctime.strftime(
            "%Y%m%d%H%M%S") + '/' + my_file_type + '/' + '/moment0001.grd'

        ens_mean = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        #Compute max_dbz (we will use this to identify areas associated with clouds and convection)
        tmp_max_dbz = np.squeeze(np.nanmax(ens_mean['dbz'], 2))

        for ilev in range(0, nlev):  #Create a fake 3D array for the max_dbz
            #This is because the plotting function expects a 3D array as input.
            max_dbz[:, :, ilev] = tmp_max_dbz

        #=======================================================================================
예제 #3
0
            corrcoef_norain[my_exp] = dict()

            for my_var in plot_variables:
                corrcoef_rain[my_exp][my_var] = np.zeros(ntimes)
                corrcoef_norain[my_exp][my_var] = np.zeros(ntimes)

        kldgr = dict()
        sprdgr = dict()
        #Compute KLD growth
        for my_var in plot_variables:
            kld_c[my_var][kld_c[my_var] == undef] = np.nan
            kld_p[my_var][kld_p[my_var] == undef] = np.nan
            kldgr[my_var] = np.squeeze(
                np.nanmean(np.delete(kld_c[my_var] - kld_p[my_var], 4, 2),
                           2)) / delta_data.seconds
            kldgr[my_var] = csf.gaussian_smooth_2d(kldgr[my_var], 2.0, 5.0)

        my_file = basedir + '/' + my_exp + '/' + ptime.strftime(
            "%Y%m%d%H%M%S") + '/analgp/moment0002.grd'

        sprd_p = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        my_file = basedir + '/' + my_exp + '/' + ctime.strftime(
            "%Y%m%d%H%M%S") + '/guesgp/moment0002.grd'

        sprd_c = ctlr.read_data_grads(my_file, ctl_dict, masked=False)

        for my_var in plot_variables:
            sprd_c[my_var][sprd_c[my_var] == undef] = np.nan
            sprd_p[my_var][sprd_p[my_var] == undef] = np.nan
            sprdgr[my_var] = np.squeeze(