コード例 #1
0
    def test_random_extents(self):

        nblocks = 100
        blocksize = 2000
        random_file = RandomWalkGenerator(
            'noName',
            nblocks=nblocks,
            blocksize=blocksize,
        )

        extents = random_file.get_surface_extents()

        ans = DT.extents(ndims=2,
                         prec=DT.PRECISION,
                         ll_default=(0, 0),
                         ur_default=(1, 1))

        assert_array_equal(ans['ll'], extents['ll'])
        assert_array_equal(ans['ur'], extents['ur'])

        extents = random_file.get_subsurface_extents()

        ans = DT.extents(ndims=2,
                         prec=DT.PRECISION,
                         ll_default=(0, 0, 0),
                         ur_default=(1, 1, 1))

        assert_array_equal(ans['ll'], extents['ll'])
        assert_array_equal(ans['ur'], extents['ur'])
コード例 #2
0
 def get_extents(self, ndims, spillets=None):
     if ndims == 2:
         return DT.extents(ndims=2,
                           prec=DT.PRECISION,
                           ur_default=(1, 1),
                           ll_default=(0, 0))
     elif ndims == 3:
         return DT.extents(ndims=3,
                           prec=DT.PRECISION,
                           ur_default=(1, 1, 1),
                           ll_default=(0, 0, 0))
     else:
         raise PyStochIOError('Invalid number of dimensions selected.')
コード例 #3
0
def add_product(ops_list, reduce_list, cleaner_list, grid_data,
                product_metadata):

    product_type = product_metadata[PRODUCT_TYPE]

    #allocate space for the gridded result and initialize with max int
    metadata = {'units': 'm', 'long_name': 'maximum on shore oil thickness'}
    reduce_array = grid_data.allocate(product_name,
                                      DT.SPRECISION,
                                      metadata=metadata)

    # Make a temporary array that will be used in the operation
    map_array = reduce_array.copy()

    # Calculate cell diagonal:
    cell_diagonal = grid_data._grid.cell_diagonal

    coroutine = max_shore_grid_thickness_op(map_array, cell_diagonal)

    # Append the operation coroutine to the list
    ops_list.append(coroutine)

    # use the array allocated in grid data as the result in the reduce method
    coroutine = max_reduce(reduce_array)

    # append the reduce coroutine and the argument it will be passed to the reduce list
    reduce_list.append((coroutine, map_array))

    # Now add a reset value for the map array
    cleaner_list.append((map_array, DT.SPRECISION(0.0)))
コード例 #4
0
def add_product(ops_list, reduce_list, cleaner_list, grid_data,
                product_metadata):

    product_type = product_metadata[PRODUCT_TYPE]

    config = get_config()
    # This method now uses a quadratic index space
    cell_depth_range = config[
        product_type].products.max_concentration.cell_depth_range

    #allocate space for the gridded result

    metadata = {'units': 'g/cm^3', 'long_name': 'Maximum Oil Concentration'}
    reduce_array = grid_data.allocate(product_name,
                                      DT.SPRECISION,
                                      metadata=metadata)

    # Calculate cell area:
    cell_area = grid_data._grid.cell_area  # m**2

    # Make a temporary array that will be used in the operation
    map_array = reduce_array.copy()
    coroutine = max_concentration_op(map_array, cell_depth_range, cell_area)

    # Append the operation coroutine to the list
    ops_list.append(coroutine)

    coroutine = max_reduce(reduce_array)

    reduce_list.append((coroutine, map_array))

    # Now add a reset value for the map array
    cleaner_list.append((map_array, DT.SPRECISION(0.0)))
コード例 #5
0
    def make_particles(self, iteration=0):

        particles = numpy.zeros((self.blocksize, ), dtype=DT.IDEAL_PARTICLE)
        # Grrr - no way to pass the buffer to put random numbers in ?

        particles['loc'] = util.random_sample(particles['loc'].shape,
                                              dtype=DT.PRECISION)
        particles['prev_loc'] = util.random_sample(particles['loc'].shape,
                                                   dtype=DT.PRECISION)

        particles[
            'lifetime'][:] = (  # Cheat and make time a function of block number
                DT.SPRECISION(iteration))

        mass_offset = DT.SPRECISION(0.9)
        mass_scale = DT.SPRECISION(.1)
        mass_time_param = DT.SPRECISION(1) / numpy.log10(
            DT.SPRECISION(iteration + 2))
        # 1/log10(i+2) * (rand[0.9 - 1.0) ) )
        particles['mass'] = (
            mass_time_param *
            (util.random_sample(*particles['mass'].shape) * mass_scale +
             mass_offset))

        dens_offset = DT.SPRECISION(0.9)
        dens_scale = DT.SPRECISION(.1)
        # dens = rand[0.9 - 1.0)
        particles['density'] = (
            numpy.random.rand(*particles['density'].shape) * dens_scale +
            dens_offset)

        return particles
コード例 #6
0
def add_product(ops_list, reduce_list, cleaner_list, grid_data,
                product_metadata):

    product_type = product_metadata[PRODUCT_TYPE]

    #allocate space for the gridded result

    metadata = {
        'units': 'm',
        'long_name': 'Maximum of spillet thickness',
        'fill_value': DT.SPRECISION(0.0)
    }
    reduce_array = grid_data.allocate(product_name,
                                      DT.SPRECISION,
                                      metadata=metadata)

    # Calculate cell area:
    cell_area = grid_data._grid.cell_area

    # Make a temporary array that will be used in the operation
    map_array = reduce_array.copy()

    #config = get_config()
    #advective_dispersion = config[product_type].products.max_of_spillet_thickness.get('advective_dispersion',numpy.nan)
    #spillet_dispersion = advective_dispersion * 0.2 # m^2/s
    #coroutine = thickest_spillet_op(map_array, spillet_dispersion)

    coroutine = thickest_spillet_op(map_array)

    # Append the operation coroutine to the list
    ops_list.append(coroutine)

    coroutine = max_reduce(reduce_array)

    reduce_list.append((coroutine, map_array))

    # Now add a reset value for the map array
    cleaner_list.append((map_array, DT.SPRECISION(0.0)))
コード例 #7
0
    def get_extents(self, ndims, spillets=None):

        result = DT.extents(ndims=ndims, prec=DT.SPRECISION)

        pmin = result['ll'][0]
        pmax = result['ur'][0]

        pmax[0] = numpy.max(self._record_data["UR Bound"]["Lon"], axis=0)
        pmax[1] = numpy.max(self._record_data["UR Bound"]["Lat"], axis=0)

        pmin[0] = numpy.min(self._record_data["LL Bound"]["Lon"], axis=0)
        pmin[1] = numpy.min(self._record_data["LL Bound"]["Lat"], axis=0)

        # Add a little margin to the actual value...
        result['ll'][0] = pmin - 0.001 * abs(pmin)
        result['ur'][0] = pmax + 0.001 * abs(pmax)

        return result
コード例 #8
0
def main():
    logger.info('Starting main program')

    # Get system command line arguments
    options = get_command_line_arguments()

    if options.cfgpath is not None:
        config = Config(options.cfgpath)
    else:
        for loc in os.curdir, \
                os.path.expanduser(os.path.join('~','.pystoch')), \
                os.environ.get("PYSTOCH_CONF_DIR",''):

            cfg_file = os.path.join(loc, 'config.yaml')
            if os.path.exists(cfg_file):
                config = Config(cfg_file)
                break

    DT(ndims=2, precision=numpy.float32, location_units='LatLon')

    wf = WorkFlow()

    # Uses the Config file rather than passing arguments
    wf.select_grid_method()

    wf.select_trajectory_files(options.path, options.prefix, None)

    # Uses the Config file rather than passing arguments
    wf.setup_products()
    logger.info('Pystoch setup complete - running now...')

    wf.run()

    logger.info('Pystoch run complete - writing output')

    if mpi_msr:
        for name, gd in wf._grid_data.iteritems():
            logger.info('Writing output for %s products' % name)

            netcdf_out(options.outfile + '_' + name + '.nc', options.prefix,
                       wf.nsims, name, gd)

    logger.info('Pystoch run complete')
コード例 #9
0
def grid_function(grid,operators):

    index_position = numpy.zeros(0,dtype=DT.IVECTOR) # The IJ location of the particle

    while True:
        (block, metadata) = (yield) # an array of particle datatype
                
        blen = len(block)
        
        particle_position = block['loc']
        
        if index_position.shape != particle_position.shape:
            index_position = numpy.zeros(particle_position.shape,numpy.int32)
        else:
            index_position[:]=0

        grid.indexof(particle_position, out=index_position)
            
                
        for target in operators:
            # send: Particles, index_position, weights and metadata for each cell
            target.send((block,index_position,DT.PRECISION(1.0), metadata))
コード例 #10
0
def add_product(ops_list, reduce_list, cleaner_list, grid_data,
                product_metadata):

    product_type = product_metadata[PRODUCT_TYPE]

    #allocate space for the gridded result
    metadata = {'units': 'm^3', 'long_name': 'Aggregate Oil Volume'}
    reduce_array = grid_data.allocate(product_name,
                                      DT.SPRECISION,
                                      metadata=metadata)

    # Make a temporary array that will be used in the operation
    map_array = reduce_array.copy()
    coroutine = oil_volume_op(map_array)

    # Append the operation coroutine to the list
    ops_list.append(coroutine)

    coroutine = max_reduce(reduce_array)

    reduce_list.append((coroutine, map_array))

    # Now add a reset value for the map array
    cleaner_list.append((map_array, DT.SPRECISION(0.0)))
コード例 #11
0
        import arcinfo
    except:
        try:
            import arceditor
        except:
            pass

import arcpy

import sys, os, struct, numpy
import datetime

from pystoch.readers.oilmdl_om_reader import OilModelDirectAccessOMReader as OMDAR
from pystoch.datatypes import DT

DT(ndims=2, precision=numpy.float32, location_units='LatLon')
sr = arcpy.SpatialReference(
    os.path.join(
        arcpy.GetInstallInfo()["InstallDir"],
        r"Coordinate Systems/Geographic Coordinate Systems/World/WGS 1984.prj")
)


def ReadScenarioFile(sScenarioFile):
    sGeoLocPath = os.path.split(os.path.split(sScenarioFile)[0])[0]

    oScenFile = open(sScenarioFile, "r")
    scenLines = oScenFile.readlines()

    for i in range(0, len(scenLines)):
        key = scenLines[i].split("=")[0]
コード例 #12
0
import matplotlib
import matplotlib.pyplot as plt

from pystoch import util
from pystoch.datatypes import DT, Singleton
from pystoch.grids import Grid
from pystoch.grid_data import GridData
from pystoch.coroutines import *
from pystoch.readers.oilmdl_reader import OilModelDirectAccessReader

import logging
logger = logging.getLogger('pystoch.scripts.oilmdl_oil')

nSims = 12
fbase = './trajectory_data/oilmap/pystoch_test_case/PYSTOCHTESTCASE_s{0:03}'
DT(ndims=2, precision=numpy.float64, location_units='LatLon')

extents = numpy.ones(1, DT.EXTENTS)
extent_calc = extents_coroutine(extents)

# make the data streamer
streamer = stream_coroutine(target=extent_calc)

for i in xrange(nSims):
    fname = fbase.format(i + 1)
    logger.info('Reading from file: "%s"' % fname)
    file_reader = OilModelDirectAccessReader(fname)
    streamer.send(file_reader)

logger.info('Got Extents: %s' % extents)
コード例 #13
0
 def setUp(self):
     """
     Setup test
     """
     DT(ndims=2,precision=numpy.float32,location_units='LatLon')
コード例 #14
0
def grid_function(grid, operators):

    # Dummy allocations - will be reallocated as needed.
    index_position = numpy.zeros(
        0, dtype=DT.IVECTOR)  # The IJ location of the particle
    prev_index_position = numpy.zeros(
        0, dtype=DT.IVECTOR
    )  # The IJ location of the particle at the previous timestep
    index_diff = numpy.zeros(
        0, dtype=DT.IVECTOR)  # The IJ location change between timesteps
    index_sum = numpy.zeros(
        0, dtype=DT.IVECTOR)  # The sum of the IJ location change

    nsamples_per_grid = 3

    # Temporary hack:
    block_number = 0

    while True:
        (block, metadata) = (yield)  # an array of particle datatype

        block_number += 1

        blen = len(block)

        particle_position = block['loc']
        prev_particle_position = block['prev_loc']

        # the number of dimensions in a position vector
        ndims = particle_position.shape[1]

        if index_position.shape != particle_position.shape:
            index_position = numpy.zeros(particle_position.shape, numpy.int32)
        else:
            index_position[:] = 0

        if prev_index_position.shape != prev_particle_position.shape:
            prev_index_position = numpy.zeros(prev_particle_position.shape,
                                              numpy.int32)
        else:
            prev_index_position[:] = 0

        if index_diff.shape != prev_particle_position.shape:
            index_diff = numpy.zeros(prev_particle_position.shape, numpy.int32)
        else:
            index_diff[:] = 0

        if index_sum.shape != blen:
            index_sum = numpy.zeros(blen, numpy.int32)
        else:
            index_sum[:] = 0

        # Calculate the IJ index of each particle now and previously
        grid.indexof(particle_position, out=index_position)
        grid.indexof(prev_particle_position, out=prev_index_position)

        #evaluate the absolute value of the index space difference
        index_diff[...] = numpy.abs(index_position - prev_index_position)

        index_sum[...] = numpy.sum(index_diff, axis=1)

        for i in xrange(blen):

            if index_sum[i] > 0:

                delta_pos = particle_position[i, :] - prev_particle_position[
                    i, :]
                samples = index_sum[i] * nsamples_per_grid

                interpolated_positions = delta_pos.reshape(
                    1, ndims) * numpy.arange(samples).reshape(
                        samples, 1) / DT.PRECISION(samples)

                interpolated_positions += prev_particle_position[i, :].reshape(
                    1, ndims)

                interpolated_index_positions = grid.indexof(
                    interpolated_positions)

                # Make the weight array the correct size
                weight = DT.PRECISION(1.0 / samples) * numpy.ones((samples, ))
                for target in operators:
                    target.send((block[i], interpolated_index_positions,
                                 weight, metadata))

            else:
                # Make the weight array the correct size
                weight = numpy.ones((1, ))
                for target in operators:
                    target.send(
                        (block[i], prev_index_position[i, :].reshape(1, ndims),
                         weight, metadata))
コード例 #15
0
    def _load_meta(self):

        record_data = None
        header_data = None

        version_number = None

        with open('.'.join([self.fname, self.RECORD_FILE_SUFFIX]),
                  'rb') as pcl_file:

            # Read the header record
            header_data = numpy.fromfile(
                pcl_file, dtype=self.SPLMDL_PCL_HEADER_RECORD_TYPE, count=1)

            # Read the rest of the entries
            record_data = numpy.fromfile(
                pcl_file, dtype=self.SPLMDL_PCL_LOOKUP_RECORD_TYPE, count=-1)

        if header_data is None:
            raise PyStocheIOError(
                "Could not read the header from the file: '%s'" %
                '.'.join([self.fname, self.RECORD_FILE_SUFFIX]))

        version_number = header_data[0]['pcd_version']
        if not version_number in self.supported_versions:
            raise NotImplementedError('Can not read pcd file version "%d"' %
                                      version_number)

        self._version_number = version_number

        if record_data is None:
            raise PyStochIOError(
                "Could not read data from PCL file:'%s'" %
                '.'.join([self.fname, self.RECORD_FILE_SUFFIX]))

        if len(record_data) == 0:
            raise PyStochIOError(
                "PCL file '%s' is empty - no records!" %
                '.'.join([self.fname, self.RECORD_FILE_SUFFIX]))

        ### CONVERT FROM FORTRAN TO C INDEXING! ###
        record_data['record_start'] -= 1

        self._record_data = record_data

        npseudo = header_data[0]['npseudo']

        self.set_record_size(version_number, npseudo)

        #logger.info('record_header: \n%s' % header_data)
        #logger.info('record_lookup: \n%s' % record_data)
        if version_number == -11:
            lookup_size = 19 * 4
            self._SPLMDL_PCD_LOOKUP_RECORD_TYPE = numpy.dtype([
                ('surface_records', numpy.int32, 1),
                ('subsurface_dissolved_records', numpy.int32, 1),
                ('subsurface_residual_records', numpy.int32, 1),
                ('tarball_records', numpy.int32, 1),
                ('sediment_records', numpy.int32, 1),
                ('shoreline_records', numpy.int32, 1),
                ('wave_height', DT.SPRECISION, 1),
                ('wind_average', DT.SPRECISION, 1),
                ('velocity_max', DT.SPRECISION, 1),
                ('surface_extent', DT.extent_type(ndims=2, prec=DT.SPRECISION),
                 1),  #Must reorder from: #XMIN, XMAX, YMIN, YMAX
                ('subsurface_extent',
                 DT.extent_type(ndims=3, prec=DT.SPRECISION),
                 1),  #Must reorder from: #XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX
                ### Unknown number of psuedo components - must be defined when the header is read!
                ('junk', numpy.character, self.PCD_RECORD_SIZE - lookup_size),
            ])
        elif version_number == -10:
            lookup_size = 18 * 4
            self._SPLMDL_PCD_LOOKUP_RECORD_TYPE = numpy.dtype([
                ('surface_records', numpy.int32, 1),
                ('subsurface_dissolved_records', numpy.int32, 1),
                ('subsurface_residual_records', numpy.int32, 1),
                ('sediment_records', numpy.int32, 1),
                ('shoreline_records', numpy.int32, 1),
                ('wave_height', DT.SPRECISION, 1),
                ('wind_average', DT.SPRECISION, 1),
                ('velocity_max', DT.SPRECISION, 1),
                ('surface_extent', DT.extent_type(ndims=2, prec=DT.SPRECISION),
                 1),  #Must reorder from: #XMIN, XMAX, YMIN, YMAX
                ('subsurface_extent',
                 DT.extent_type(ndims=3, prec=DT.SPRECISION),
                 1),  #Must reorder from: #XMIN, XMAX, YMIN, YMAX, ZMIN, ZMAX
                ### Unknown number of psuedo components - must be defined when the header is read!
                ('junk', numpy.character, self.PCD_RECORD_SIZE - lookup_size),
            ])
        else:  # -8 or -9
            lookup_size = 8 * 4
            self._SPLMDL_PCD_LOOKUP_RECORD_TYPE = numpy.dtype([
                ('surface_records', numpy.int32, 1),
                ('subsurface_dissolved_records', numpy.int32, 1),
                ('subsurface_residual_records', numpy.int32, 1),
                ('sediment_records', numpy.int32, 1),
                ('shoreline_records', numpy.int32, 1),
                ('wave_height', DT.SPRECISION, 1),
                ('wind_average', DT.SPRECISION, 1),
                ('velocity_max', DT.SPRECISION, 1),
                ### Unknown number of psuedo components - must be defined when the header is read!
                ('junk', numpy.character, self.PCD_RECORD_SIZE - lookup_size),
            ])

        # particle_type is 0
        SPLMDL_SURFACE_PARTICLE = self.create_surface_spillet_type(
            version_number, npseudo)

        # particle_type is -1
        SPLMDL_SUBSURFACE_DISSOLVED_PARTICLE = self.create_subsurface_dissolved_spillet_type(
            version_number, npseudo)

        # particle_type is -2
        SPLMDL_SUBSURFACE_RESIDUAL_PARTICLE = self.create_subsurface_residual_spillet_type(
            version_number, npseudo)

        # particle_type is -3
        SPLMDL_TARBALL_PARTICLE = self.create_tarball_spillet_type(
            version_number, npseudo)

        # particle_type is -12
        SPLMDL_SEDIMENT_PARTICLE = self.create_sediment_spillet_type(
            version_number, npseudo)

        SPLMDL_SHORELINE_PARTICLE = self.create_shoreline_spillet_type(
            version_number, npseudo)

        self.spillet_types = {
            SplModelDirectAccessReader.SURFACE_SPILLETS: {
                'dtype': SPLMDL_SURFACE_PARTICLE,
                'offset': None,
                'count': None
            },
            SplModelDirectAccessReader.SHORELINE_SPILLETS: {
                'dtype': SPLMDL_SHORELINE_PARTICLE,
                'offset': None,
                'count': None
            },
            SplModelDirectAccessReader.SUBSURFACE_DISSOLVED_SPILLETS: {
                'dtype': SPLMDL_SUBSURFACE_DISSOLVED_PARTICLE,
                'offset': None,
                'count': None
            },
            SplModelDirectAccessReader.SUBSURFACE_RESIDUAL_SPILLETS: {
                'dtype': SPLMDL_SUBSURFACE_RESIDUAL_PARTICLE,
                'offset': None,
                'count': None
            },
            SplModelDirectAccessReader.TARBALL_SPILLETS: {
                'dtype': SPLMDL_TARBALL_PARTICLE,
                'offset': None,
                'count': None
            },
            SplModelDirectAccessReader.SEDIMENT_SPILLETS: {
                'dtype': SPLMDL_SEDIMENT_PARTICLE,
                'offset': None,
                'count': None
            },
        }

        if version_number <= -11 and SplModelDirectAccessReader.TARBALL_SPILLETS not in self.product_spillet_map[
                SURFACE]:
            # Modify the class variable based on whether this is version 11 or not...
            # Not a great way to deal with this. Better ideas?
            self.product_spillet_map[SURFACE].append(
                SplModelDirectAccessReader.TARBALL_SPILLETS)

            self.product_spillet_map[SUBSURFACE].append(
                SplModelDirectAccessReader.TARBALL_SPILLETS)
コード例 #16
0
 def setUp(self):
     """
     Setup test
     """
     Singleton._instances.clear()
     DT(ndims=2, precision=numpy.float32, location_units='LatLon')