def main(filename):
    time_step = mc.time_picker(filename)

    # Load all the data needed to calculation core, clouds, updrafts, etc
    # at the current time_step.
    nc_file = Dataset(filename)

    tabs_field = nc_file.variables["TABS"][0, :].astype(double)
    qv_field = nc_file.variables["QV"][0, :].astype(double) / 1000.0
    qn_field = nc_file.variables["QN"][0, :].astype(double) / 1000.0
    p_field = nc_file.variables["p"][:].astype(double) * 100.0

    cloud_field = qn_field > 0.0

    thetav_field = SAM.theta_v(p_field[:, numpy.newaxis, numpy.newaxis], tabs_field, qv_field, qn_field, 0.0)

    buoy_field = thetav_field > (thetav_field.mean(2).mean(1))[:, numpy.newaxis, numpy.newaxis]

    u_field = nc_file.variables["U"][0, :].astype(double)
    u_field[:, :-1, :] += u_field[:, 1:, :]
    u_field[:, -1, :] += u_field[:, 0, :]
    u_field = u_field / 2.0

    v_field = nc_file.variables["V"][0, :].astype(double)
    v_field[:, :, :-1] += v_field[:, :, 1:]
    v_field[:, :, -1] += v_field[:, :, 0]
    v_field = v_field / 2.0

    #    print "Load w"
    w_field = nc_file.variables["W"][0, :].astype(double)
    w_field[:-1, :, :] += w_field[1:, :, :]
    w_field[:-1, :, :] = w_field[:-1, :, :] / 2.0

    up_field = w_field > 0.0

    core_field = up_field & buoy_field & cloud_field

    #    print "Load plume"
    tr_field = nc_file.variables["TR01"][0, :].astype(double)
    x = nc_file.variables["x"][:].astype(double)
    y = nc_file.variables["y"][:].astype(double)
    z = nc_file.variables["z"][:].astype(double)

    nc_file.close()

    tr_mean = tr_field.reshape((len(z), len(y) * len(x))).mean(1)
    tr_stdev = numpy.sqrt(tr_field.reshape((len(z), len(y) * len(x))).var(1))
    tr_min = 0.05 * numpy.cumsum(tr_stdev) / (numpy.arange(len(tr_stdev)) + 1)

    #    plume_field = (tr_field > numpy.max(numpy.array([tr_mean + tr_stdev, tr_min]), 0)[:, numpy.newaxis, numpy.newaxis]) & up_field
    plume_field = tr_field > numpy.max(numpy.array([tr_mean + tr_stdev, tr_min]), 0)[:, numpy.newaxis, numpy.newaxis]

    save_file = Dataset("%s/tracking/cloudtracker_input_%08g.nc" % (mc.data_directory, time_step), "w")

    save_file.createDimension("x", len(x))
    save_file.createDimension("y", len(y))
    save_file.createDimension("z", len(z))

    xvar = save_file.createVariable("x", "f", ("x",))
    yvar = save_file.createVariable("y", "f", ("y",))
    zvar = save_file.createVariable("z", "f", ("z",))

    corevar = save_file.createVariable("core", "i", ("z", "y", "x"))
    condvar = save_file.createVariable("condensed", "i", ("z", "y", "x"))
    plumevar = save_file.createVariable("plume", "i", ("z", "y", "x"))
    uvar = save_file.createVariable("u", "f", ("z", "y", "x"))
    vvar = save_file.createVariable("v", "f", ("z", "y", "x"))
    wvar = save_file.createVariable("w", "f", ("z", "y", "x"))

    xvar[:] = x[:]
    yvar[:] = y[:]
    zvar[:] = z[:]

    corevar[:] = core_field[:]
    condvar[:] = cloud_field[:]
    plumevar[:] = plume_field[:]
    uvar[:] = u_field[:]
    vvar[:] = v_field[:]
    wvar[:] = w_field[:]

    save_file.close()
def main(filename):
    vars = {
          'ETETCLD': var_calcs.etetcld,
          'DTETCLD': var_calcs.dtetcld,
          'EQTETCLD': var_calcs.eqtetcld,
          'DQTETCLD': var_calcs.dqtetcld,
          'ETTETCLD': var_calcs.ettetcld,
          'DTTETCLD': var_calcs.dttetcld,
          'EWTETCLD': var_calcs.ewtetcld,
          'DWTETCLD': var_calcs.dwtetcld,
          'VTETCLD': var_calcs.vtetcld,
          'MFTETCLD': var_calcs.mftetcld,
    }
    
    # Automatically load time step from output file name
    time = mc.time_picker(filename)
    
    # Load CDF Files
    nc_file = Dataset(filename)
    stat_file = Dataset(mc.get_stat())

    data = {'z': stat_file.variables['z'][:].astype(double),
            'RHO' : stat_file.variables['RHO'][time,:].astype(double),
            'PRES' : stat_file.variables['PRES'][time,:].astype(double)*100.}
    stat_file.close()

    # For each cloud, iterate over all times
    cloud_filename = '../cloudtracker/pkl/cloud_data_%08d.pkl' % time
    # Load the cloud data at that timestep
    clouds = cPickle.load(open(cloud_filename, 'rb'))
        
    ids = clouds.keys()
    ids.sort()
        
    data['ids'] = numpy.array(ids)
    for name in ('ETETCLD', 'DTETCLD',
                 'EQTETCLD', 'DQTETCLD',
                 'ETTETCLD', 'DTTETCLD',
                 'EWTETCLD', 'DWTETCLD',
                 'VTETCLD', 'MFTETCLD'):
        data[name] = nc_file.variables[name][0, :].astype(numpy.double)
                
    # For each cloud, create a savefile for each profile
    savefiles = {}
    profiles = {}
    for item in ('condensed_entrain',):
        savefile, variables = create_savefile(time, data, vars, item)
        savefiles[item] = savefile
        profiles[item] = variables
        
    for n, id in enumerate(ids):
        print "time: ", time, " id: ", id
        # Select the current cloud id
        cloud = clouds[id]
        cloud['condensed_entrain'] = numpy.hstack([cloud['condensed'], cloud['condensed_shell']])

        make_profiles(profiles, cloud, vars, data, n)
            
    for savefile in savefiles.values():
        savefile.close()

    nc_file.close()
Example #3
0
def main(filename):
    vars = {
          'AREA': var_calcs.area,
          'TABS': var_calcs.tabs,
          'QN': var_calcs.qn,
          'QV': var_calcs.qv,
          'QT': var_calcs.qt,
          'U': var_calcs.u,
          'V': var_calcs.v,
          'W': var_calcs.w,
          'THETAV': var_calcs.thetav,
          'THETAV_LAPSE': var_calcs.thetav_lapse,
          'THETAL': var_calcs.thetal,
          'MSE': var_calcs.mse,
          'RHO': var_calcs.rho,
          'PRES': var_calcs.press,
          'WQREYN': var_calcs.wqreyn,
          'WWREYN': var_calcs.wwreyn,
          'DWDZ': var_calcs.dw_dz,
          'DPDZ': var_calcs.dp_dz,
          'TR01': var_calcs.tr01,
    }
    
    # Automatically load time step from output file name
    time = mc.time_picker(filename)
    
    # Load CDF Files
    nc_file = Dataset(filename)
    stat_file = Dataset(mc.get_stat())

    data = {'z': nc_file.variables['z'][:].astype(double),
            'p': nc_file.variables['p'][:].astype(double),
            'RHO' : stat_file.variables['RHO'][time,:].astype(double),
            }
    stat_file.close()
    
    # For each cloud, iterate over all times
    cloud_filename = '../cloudtracker/pkl/cloud_data_%08d.pkl' % time
   
    # Load the cloud data at that timestep
    clouds = cPickle.load(open(cloud_filename, 'rb'))
       
    ids = clouds.keys()
    ids.sort()

    data['ids'] = numpy.array(ids)
    for name in ('QV', 'QN', 'TABS', 'PP', 'U', 'V', 'W', 'TR01'):
        data[name] = nc_file.variables[name][0, :].astype(numpy.double)
                
    # For each cloud, create a savefile for each profile
    savefiles = {}
    profiles = {}
    for item in ('core', 'condensed', 'condensed_shell', 
                 'condensed_edge', 'condensed_env',
                 'core_shell', 
                 'core_edge', 'core_env', 
                 'plume'):            
		 
        savefile, variables = create_savefile(time, data, vars, item)
        savefiles[item] = savefile
        profiles[item] = variables
        
    for n, id in enumerate(ids):
        print "time: ", time, " id: ", id
        # Select the current cloud id
        cloud = clouds[id]
	
        make_profiles(profiles, cloud, vars, data, n)
        
    for savefile in savefiles.values():
        savefile.close()

    nc_file.close()