Example #1
0
def load_particles(particle_dataset):
    """
    Load a netCDF4 dataset of particles and return it. Close the dataset when
    done.

    :param dataset: open readable NetCDF dataset
    :type dataset: netCDF.Dataset
    :return: (
                x coordinates of particles (longitude),
                y coordinates of particles (latitude),
                z coordinates of particles in meters,
                ages of particles in seconds,
                times converted to datetime objects,
                original time units,
                original time base date,
            )
    :rtype: tuple of (
                TxN array,
                TxN array,
                TxN array,
                TxN array,
                T array of datetime objects,
                str,
                array,
            )

    Where T is the number of time steps and N is the number of particles.
  
"""

    ncparticle = ncp.nc_particle_file(particle_dataset)

    NumLE = ncparticle.particle_count[:]
    times = ncparticle.times[:]
    NumTimeSteps = len(times)
    data = ncparticle.get_all_timesteps(
        ['longitude', 'latitude', 'depth', 'mass', 'age'])
    #print data.keys()

    #Check whether the number of particles remains constant in each time-step
    #(instaneous release) or changes over time (continuous release) (Zelenke).
    if len(np.unique(NumLE)) == 1:

        #Reshape LE time-series vectors into rectangular matricies (NumTimeSteps x NumLE).
        lon = data['longitude'].reshape((NumTimeSteps, -1))
        lat = data['latitude'].reshape((NumTimeSteps, -1))
        depth = data['depth'].reshape((NumTimeSteps, -1))
        mass = data['mass'].reshape((NumTimeSteps, -1))
        age = data['age'].reshape((NumTimeSteps, -1))

    else:

        #Put varying number of LEs in each time-step into uniformly sized vectors
        #to allow for resizing into rectangular matricies (NumTimeSteps x NumLE).

        MaxNumLE = np.max(NumLE)
        lons = np.ones(MaxNumLE * NumTimeSteps,
                       dtype=data['longitude'].dtype) * np.array(
                           np.NaN, dtype=data['longitude'].dtype
                       )  #Preallocate vector for subsequent loop.
        lats = np.ones(MaxNumLE * NumTimeSteps,
                       dtype=data['latitude'].dtype) * np.array(
                           np.NaN, dtype=data['latitude'].dtype)
        depths = np.ones(MaxNumLE * NumTimeSteps,
                         dtype=data['depth'].dtype) * np.array(
                             np.NaN, dtype=data['depth'].dtype)
        masses = np.ones(MaxNumLE * NumTimeSteps,
                         dtype=data['mass'].dtype) * np.array(
                             np.NaN, dtype=data['mass'].dtype)
        ages = np.copy(masses)

        CumSumLE = np.append(0, np.cumsum(NumLE))
        for i in range(NumTimeSteps):
            #Put however many LEs there were in each time-step into that time-step's (uniformly sized) portion of the vector.
            #Leave the rest of the chunk populated with NaNs, if there aren't enough LEs in that particular time-step to fill-up the chunk.
            lons[i * MaxNumLE:(i * MaxNumLE) +
                 NumLE[i]] = data['longitude'][CumSumLE[i]:CumSumLE[i + 1]]
            lats[i * MaxNumLE:(i * MaxNumLE) +
                 NumLE[i]] = data['latitude'][CumSumLE[i]:CumSumLE[i + 1]]
            depths[i * MaxNumLE:(i * MaxNumLE) +
                   NumLE[i]] = data['depth'][CumSumLE[i]:CumSumLE[i + 1]]
            masses[i * MaxNumLE:(i * MaxNumLE) +
                   NumLE[i]] = data['mass'][CumSumLE[i]:CumSumLE[i + 1]]
            ages[i * MaxNumLE:(i * MaxNumLE) +
                 NumLE[i]] = data['age'][CumSumLE[i]:CumSumLE[i + 1]]
        #Reshape (NumTimeSteps x NumLE).
        lon = lons.reshape((NumTimeSteps, -1))
        lat = lats.reshape((NumTimeSteps, -1))
        depth = depths.reshape((NumTimeSteps, -1))
        mass = masses.reshape((NumTimeSteps, -1))
        age = ages.reshape((NumTimeSteps, -1))

    time_units = ncparticle.time_units
    mass_units = ncparticle.mass_units  #Included units of mass specified in the NetCDF file as an output variable (Zelenke).

    return (
        lon,
        lat,
        depth,
        age,
        times,
        time_units,
        mass,
        mass_units,
    )
Example #2
0
def load_particles(particle_dataset):
    """
    Load a netCDF4 dataset of particles and return it. Close the dataset when
    done.

    :param dataset: open readable NetCDF dataset
    :type dataset: netCDF.Dataset
    :return: (
                x coordinates of particles (longitude),
                y coordinates of particles (latitude),
                z coordinates of particles in meters,
                ages of particles in seconds,
                times converted to datetime objects,
                original time units,
                original time base date,
            )
    :rtype: tuple of (
                TxN array,
                TxN array,
                TxN array,
                TxN array,
                T array of datetime objects,
                str,
                array,
            )

    Where T is the number of time steps and N is the number of particles.
  
"""



    ncparticle = ncp.nc_particle_file(particle_dataset)
    
    
    
    NumLE = ncparticle.particle_count[:]
    times = ncparticle.times[:]    
    NumTimeSteps = len(times)
    data = ncparticle.get_all_timesteps(['longitude','latitude','depth','mass','age'])
    #print data.keys()
    
    
    #Check whether the number of particles remains constant in each time-step
    #(instaneous release) or changes over time (continuous release) (Zelenke).
    if len( np.unique(NumLE) ) == 1:
        
        #Reshape LE time-series vectors into rectangular matricies (NumTimeSteps x NumLE).
        lon = data['longitude'].reshape( (NumTimeSteps, -1) )
        lat = data['latitude'].reshape( (NumTimeSteps, -1) )
        depth = data['depth'].reshape( (NumTimeSteps, -1) )
        mass = data['mass'].reshape( (NumTimeSteps, -1) )
        age = data['age'].reshape( (NumTimeSteps, -1) )
        
    else:
        
        #Put varying number of LEs in each time-step into uniformly sized vectors
        #to allow for resizing into rectangular matricies (NumTimeSteps x NumLE).
        
        MaxNumLE = np.max(NumLE)
        lons = np.ones( MaxNumLE*NumTimeSteps, dtype = data['longitude'].dtype ) * np.array(np.NaN, dtype = data['longitude'].dtype ) #Preallocate vector for subsequent loop.
        lats = np.ones( MaxNumLE*NumTimeSteps, dtype = data['latitude'].dtype ) * np.array(np.NaN, dtype = data['latitude'].dtype )
        depths = np.ones( MaxNumLE*NumTimeSteps, dtype = data['depth'].dtype ) * np.array(np.NaN, dtype = data['depth'].dtype )
        masses = np.ones( MaxNumLE*NumTimeSteps, dtype = data['mass'].dtype ) * np.array(np.NaN, dtype = data['mass'].dtype )
        ages = np.copy(masses)
        
        CumSumLE = np.append(0,np.cumsum(NumLE))
        for i in range(NumTimeSteps):
            #Put however many LEs there were in each time-step into that time-step's (uniformly sized) portion of the vector.
            #Leave the rest of the chunk populated with NaNs, if there aren't enough LEs in that particular time-step to fill-up the chunk.
            lons[ i*MaxNumLE : (i*MaxNumLE)+NumLE[i] ] = data['longitude'][ CumSumLE[i] : CumSumLE[i+1] ]
            lats[ i*MaxNumLE : (i*MaxNumLE)+NumLE[i] ] = data['latitude'][ CumSumLE[i] : CumSumLE[i+1] ]
            depths[ i*MaxNumLE : (i*MaxNumLE)+NumLE[i] ] = data['depth'][ CumSumLE[i] : CumSumLE[i+1] ]
            masses[ i*MaxNumLE : (i*MaxNumLE)+NumLE[i] ] = data['mass'][ CumSumLE[i] : CumSumLE[i+1] ]
            ages[ i*MaxNumLE : (i*MaxNumLE)+NumLE[i] ] = data['age'][ CumSumLE[i] : CumSumLE[i+1] ]
        #Reshape (NumTimeSteps x NumLE).
        lon=lons.reshape( (NumTimeSteps, -1) )
        lat = lats.reshape( (NumTimeSteps, -1) )
        depth = depths.reshape( (NumTimeSteps, -1) )
        mass = masses.reshape( (NumTimeSteps, -1) )
        age = ages.reshape( (NumTimeSteps, -1) )
    
    
    
    time_units = ncparticle.time_units
    mass_units = ncparticle.mass_units #Included units of mass specified in the NetCDF file as an output variable (Zelenke).

    return (
        lon,
        lat,
        depth,
        age,
        times,
        time_units,
        mass,
        mass_units,
    )
Example #3
0
def CompThicknessCube(FileList, OutputTimes, Grid, Weather=None):
    """
    CompThicknessCube computes the average thickness of the oil over
    each receptor site. It only works for grid receptors.
    
    Filelist is a list of netcdf file names: one for each trajectory
    
    OutputTimes is a sequence of output times, in hours, from the beginning of the run.

    Grid is a Grid object, specifying the grid parameters
    
    If Weather is not None it must be a tap_comp_volume.weather_curve object

    If Weather is None, then there is no change in volume.
        
    """

    ## read the header of the first trajectory file: It is assumed that
    ## the others will match..no check is made to assure this, but it
    ## will crash if anything is very wrong.
    #(junk, junk, HeaderData, junk) = ReadTrajectory(FileList[0],2)

    # read the trajectory data from the first netcdf file
    #print "**************"
    #print "getting header info from file:", FileList[0]
    print "nc_particles module:", nc_particles.__file__
    traj_file = nc_particles.nc_particle_file(FileList[0])

    if traj_file.get_units('age') != 'seconds':
        raise ValueError(
            "particle age units in netcdf file must be in seconds")

    NumTimesteps = traj_file.num_times
    MaxNumLEs = traj_file.particle_count[:].max()

    TimeStep = traj_file.times[1] - traj_file.times[
        0]  # assume constant timestep!
    traj_file.close()
    TimeStepHours = TimeStep.total_seconds() / 3600.00
    ## OutputTimes should already be in hours
    OutputSteps = (np.array([0] + OutputTimes) / TimeStepHours).astype(
        np.int32)  # in integer units of time step
    # Allocate the Cube
    NumSpills, NumSites, NumTimes = len(FileList), Grid.num_cells, len(
        OutputTimes)
    ## fixme: need to make this a float cube!
    Cube = np.zeros((NumTimes, NumSites, NumSpills), np.float32)

    start = time.time()  # just for timing how long it takes to run

    ## Loop through each individual trajectory
    for SpillNum in range(NumSpills):
        #print "computing spill number %i"%(SpillNum,)
        # read new trajectory file:
        #print "working with file:", FileList[SpillNum]
        traj_file = nc_particles.nc_particle_file(FileList[SpillNum])
        VolTable = np.zeros(
            (NumSites),
            np.float32)  # this will store the Maximum volume in each grid box.

        ## Step through the Cube output time steps
        for step in xrange(len(OutputSteps) - 1):
            ## step through the Trajectory time steps between each Cube Timestep
            for t in xrange(OutputSteps[step], OutputSteps[step + 1]):
                LE_lat = traj_file.get_timestep_single_var(t, 'latitude')
                LE_long = traj_file.get_timestep_single_var(t, 'longitude')
                LE_positions = np.column_stack((LE_long, LE_lat))
                NumLEs = LE_positions.shape[0]
                LE_age = traj_file.get_timestep_single_var(t, 'age').astype(
                    np.float32) / 3600.00  # age needs to be in hours
                #print "age:", LE_age
                #NOTE: for TAP -- we assume that are the particles have unit mass at the start
                #      so we don't read it from the file
                LE_mass = np.ones((NumLEs, ), dtype=np.float32)
                #print "before"
                #print LE_mass
                if Weather:
                    #print "weathering the LEs"
                    LE_mass = Weather.weather(LE_mass, LE_age)
                flags = traj_file.get_timestep_single_var(t, 'flag').astype(
                    np.uint8)
                Vol = comp_volume(LE_positions, LE_mass, flags, Grid)
                # keep the largest volume computed between output timesteps
                VolTable = np.maximum(Vol.flat, VolTable)
            ## put the max volume in the Cube at this Cube time step
            #Cube[step,:,SpillNum] = transform(VolTable, MaxNumLEs)
            Cube[step, :, SpillNum] = VolTable
        traj_file.close()
    #print "cube took %s seconds to generate"%(time.time() - start)
    return Cube
Example #4
0
def CompThicknessCube(FileList, OutputTimes, Grid, Weather=None):

    """
    CompThicknessCube computes the average thickness of the oil over
    each receptor site. It only works for grid receptors.
    
    Filelist is a list of netcdf file names: one for each trajectory
    
    OutputTimes is a sequence of output times, in hours, from the beginning of the run.

    Grid is a Grid object, specifying the grid parameters
    
    If Weather is not None it must be a tap_comp_volume.weather_curve object

    If Weather is None, then there is no change in volume.
        
    """

        
    ## read the header of the first trajectory file: It is assumed that
    ## the others will match..no check is made to assure this, but it
    ## will crash if anything is very wrong.
    #(junk, junk, HeaderData, junk) = ReadTrajectory(FileList[0],2)

    # read the trajectory data from the first netcdf file
    #print "**************"
    #print "getting header info from file:", FileList[0]
    print "nc_particles module:", nc_particles.__file__
    traj_file = nc_particles.nc_particle_file(FileList[0])
    
    if traj_file.get_units('age') != 'seconds':
        raise ValueError("particle age units in netcdf file must be in seconds")
    
    NumTimesteps = traj_file.num_times
    MaxNumLEs = traj_file.particle_count[:].max() 
    
    TimeStep = traj_file.times[1] - traj_file.times[0] # assume constant timestep!
    traj_file.close()
    TimeStepHours = TimeStep.total_seconds() / 3600.00
    ## OutputTimes should already be in hours
    OutputSteps = (np.array([0] + OutputTimes) / TimeStepHours).astype(np.int32) # in integer units of time step
    # Allocate the Cube
    NumSpills, NumSites, NumTimes = len(FileList), Grid.num_cells, len(OutputTimes)
    ## fixme: need to make this a float cube!
    Cube = np.zeros((NumTimes,NumSites,NumSpills), np.float32)

    start = time.time() # just for timing how long it takes to run

    ## Loop through each individual trajectory
    for SpillNum in range(NumSpills):
        #print "computing spill number %i"%(SpillNum,)
        # read new trajectory file:
        #print "working with file:", FileList[SpillNum]
        traj_file = nc_particles.nc_particle_file(FileList[SpillNum])
        VolTable = np.zeros((NumSites), np.float32) # this will store the Maximum volume in each grid box.

        ## Step through the Cube output time steps
        for step in xrange( len(OutputSteps) - 1 ):
            ## step through the Trajectory time steps between each Cube Timestep
            for t in xrange(OutputSteps[step], OutputSteps[step+1]):
                LE_lat = traj_file.get_timestep_single_var(t, 'latitude')
                LE_long = traj_file.get_timestep_single_var(t, 'longitude')
                LE_positions = np.column_stack((LE_long, LE_lat))
                NumLEs = LE_positions.shape[0]
                LE_age = traj_file.get_timestep_single_var(t, 'age').astype(np.float32) / 3600.00 # age needs to be in hours
                #print "age:", LE_age
                #NOTE: for TAP -- we assume that are the particles have unit mass at the start
                #      so we don't read it from the file
                LE_mass = np.ones((NumLEs,), dtype = np.float32)
                #print "before"
                #print LE_mass
                if Weather:
                    #print "weathering the LEs"
                    LE_mass = Weather.weather(LE_mass, LE_age)
                flags = traj_file.get_timestep_single_var(t, 'flag').astype(np.uint8)
                Vol = comp_volume(LE_positions, LE_mass, flags, Grid)
                # keep the largest volume computed between output timesteps
                VolTable = np.maximum(Vol.flat, VolTable)
            ## put the max volume in the Cube at this Cube time step
            #Cube[step,:,SpillNum] = transform(VolTable, MaxNumLEs)
            Cube[step,:,SpillNum] = VolTable
        traj_file.close()
    #print "cube took %s seconds to generate"%(time.time() - start)
    return Cube