Esempio n. 1
0
class InitRiseVelFromDist(InitBaseClass, Serializable):
    _state = copy.deepcopy(InitBaseClass._state)

    def __init__(self, distribution=None, **kwargs):
        """
        Set the rise velocity parameters to be sampled from a distribution.

        Use distribution to define rise_vel

        :param distribution: An object capable of generating a probability
                             distribution.
        :type distribution: Right now, we have:
                              - UniformDistribution
                              - NormalDistribution
                              - LogNormalDistribution
                              - WeibullDistribution
                            New distribution classes could be made.  The only
                            requirement is they need to have a set_values()
                            method which accepts a NumPy array.
                            (presumably, this function will also modify
                             the array in some way)
        """
        super(InitRiseVelFromDist, self).__init__(**kwargs)

        if distribution:
            self.dist = distribution
        else:
            self.dist = UniformDistribution()

    def initialize(self, num_new_particles, spill, data_arrays,
                   substance=None):
        'Update values of "rise_vel" data array for new particles'
        self.dist.set_values(data_arrays['rise_vel'][-num_new_particles:])
Esempio n. 2
0
    def __init__(self, distribution=None, **kwargs):
        """
        Set the rise velocity parameters to be sampled from a distribution.

        Use distribution to define rise_vel

        :param distribution: An object capable of generating a probability
                             distribution.
        :type distribution: Right now, we have:
                              - UniformDistribution
                              - NormalDistribution
                              - LogNormalDistribution
                              - WeibullDistribution
                            New distribution classes could be made.  The only
                            requirement is they need to have a set_values()
                            method which accepts a NumPy array.
                            (presumably, this function will also modify
                             the array in some way)
        """
        super(InitRiseVelFromDist, self).__init__(**kwargs)

        if distribution:
            self.dist = distribution
        else:
            self.dist = UniformDistribution()
Esempio n. 3
0
    def __init__(self, distribution=None,
                 water_density=1020.0, water_viscosity=1.0e-6,
                 **kwargs):
        """
        Set the droplet size from a distribution. Use the C++ get_rise_velocity
        function exposed via cython (rise_velocity_from_drop_size) to obtain
        rise_velocity from droplet size. Even though the droplet size is not
        changing over time, it is still stored in data array, as it can be
        useful for post-processing (called 'droplet_diameter')

        Use distribution to define rise_vel

        :param distribution: An object capable of generating a probability
                             distribution.
        :type distribution: Right now, we have:
                              - UniformDistribution
                              - NormalDistribution
                              - LogNormalDistribution
                              - WeibullDistribution
                            New distribution classes could be made.  The only
                            requirement is they need to have a set_values()
                            method which accepts a NumPy array.
                            (presumably, this function will also modify
                             the array in some way)

        :param water_density: 1020.0 [kg/m3]
        :type water_density: float
        :param water_viscosity: 1.0e-6 [m^2/s]
        :type water_viscosity: float
        """
        super(InitRiseVelFromDropletSizeFromDist, self).__init__(**kwargs)

        if distribution:
            self.distribution = distribution
        else:
            self.distribution = UniformDistribution()

        self.water_viscosity = water_viscosity
        self.water_density = water_density
        self.array_types.update({'rise_vel': array_types.rise_vel,
                            'droplet_diameter': array_types.droplet_diameter})
Esempio n. 4
0
class InitRiseVelFromDropletSizeFromDist(InitBaseClass, Serializable):
    _state = copy.deepcopy(InitBaseClass._state)

    def __init__(self, distribution=None,
                 water_density=1020.0, water_viscosity=1.0e-6,
                 **kwargs):
        """
        Set the droplet size from a distribution. Use the C++ get_rise_velocity
        function exposed via cython (rise_velocity_from_drop_size) to obtain
        rise_velocity from droplet size. Even though the droplet size is not
        changing over time, it is still stored in data array, as it can be
        useful for post-processing (called 'droplet_diameter')

        Use distribution to define rise_vel

        :param distribution: An object capable of generating a probability
                             distribution.
        :type distribution: Right now, we have:
                              - UniformDistribution
                              - NormalDistribution
                              - LogNormalDistribution
                              - WeibullDistribution
                            New distribution classes could be made.  The only
                            requirement is they need to have a set_values()
                            method which accepts a NumPy array.
                            (presumably, this function will also modify
                             the array in some way)

        :param water_density: 1020.0 [kg/m3]
        :type water_density: float
        :param water_viscosity: 1.0e-6 [m^2/s]
        :type water_viscosity: float
        """
        super(InitRiseVelFromDropletSizeFromDist, self).__init__(**kwargs)

        if distribution:
            self.dist = distribution
        else:
            self.dist = UniformDistribution()

        self.water_viscosity = water_viscosity
        self.water_density = water_density

    def initialize(self, num_new_particles, spill, data_arrays, substance):
        """
        Update values of 'rise_vel' and 'droplet_diameter' data arrays for
        new particles. First create a droplet_size array sampled from specified
        distribution, then use the cython wrapped (C++) function to set the
        'rise_vel' based on droplet size and properties like LE_density,
        water density and water_viscosity:
        gnome.cy_gnome.cy_rise_velocity_mover.rise_velocity_from_drop_size()
        """
        drop_size = np.zeros((num_new_particles, ), dtype=np.float64)
        le_density = np.zeros((num_new_particles, ), dtype=np.float64)

        self.dist.set_values(drop_size)

        data_arrays['droplet_diameter'][-num_new_particles:] = drop_size
        le_density[:] = substance.density

        # now update rise_vel with droplet size - dummy for now
        rise_velocity_from_drop_size(
                                data_arrays['rise_vel'][-num_new_particles:],
                                le_density, drop_size,
                                self.water_viscosity, self.water_density)
Esempio n. 5
0
def test_ordered_collection_api():
    release_time = datetime(2012, 1, 1, 12)
    start_position = (23.0, -78.5, 0.0)
    num_elements = 100

    sc = SpillContainer()
    sc.spills += point_line_release_spill(num_elements, start_position,
                                          release_time)
    assert len(sc.spills) == 1


""" tests w/ element types set for two spills """
el0 = ElementType([
    InitWindages((0.02, 0.02), -1),
    InitRiseVelFromDist(distribution=UniformDistribution(low=1, high=10))
],
                  substance=test_oil)

el1 = ElementType(
    [InitWindages(),
     InitRiseVelFromDist(distribution=UniformDistribution())],
    substance=test_oil)

arr_types = {'windages', 'windage_range', 'windage_persist', 'rise_vel'}


@pytest.mark.parametrize(("elem_type", "arr_types"), [((el0, el1), arr_types)])
def test_element_types(elem_type, arr_types, sample_sc_no_uncertainty):
    """
    Tests that the spill_container's data_arrays associated with initializers
Esempio n. 6
0
rise_vel_array = mock_sc_array_types(['rise_vel'])
rise_vel_diameter_array = mock_sc_array_types(['rise_vel', 'droplet_diameter'])

num_elems = 10
oil = test_oil


def assert_dataarray_shape_size(arr_types, data_arrays, num_released):
    for key, val in arr_types.iteritems():
        assert data_arrays[key].dtype == val.dtype
        assert data_arrays[key].shape == (num_released, ) + val.shape


""" Initializers - following are used for parameterizing tests """
fcn_list = (
    InitWindages(), InitRiseVelFromDist(distribution=UniformDistribution()),
    InitRiseVelFromDist(distribution=NormalDistribution(mean=0, sigma=0.1)),
    InitRiseVelFromDist(distribution=LogNormalDistribution(mean=0, sigma=0.1)),
    InitRiseVelFromDist(distribution=WeibullDistribution(
        alpha=1.8, lambda_=(1 / (.693**(1 / 1.8))))),
    InitRiseVelFromDropletSizeFromDist(NormalDistribution(mean=0, sigma=0.1)))

arrays_ = (windages, rise_vel_array, rise_vel_array, rise_vel_array,
           rise_vel_array, rise_vel_diameter_array)

spill_list = (None, None, None, None, None, Spill(Release(datetime.now())))


@pytest.mark.parametrize(("fcn", "arr_types", "spill"),
                         zip(fcn_list, arrays_, spill_list))
def test_correct_particles_set_by_initializers(fcn, arr_types, spill):
def test_props():
    """
    test properties can be set
    """

    r = RiseVelocityMover()
    #r.water_density = 1
    #r.water_viscosity = 1.1e-6

    #assert r.water_density == 1
    #assert r.water_viscosity == 1.1e-6


time_step = 15 * 60  # seconds
rel_time = datetime(2012, 8, 20, 13)  # yyyy/month/day/hr/min/sec
initializers = [InitRiseVelFromDist(distribution=UniformDistribution())]
sc = sample_sc_release(
    5, (3., 6., 0.),
    rel_time,
    uncertain=False,
    arr_types={'rise_vel': gat('rise_vel')},
    substance=NonWeatheringSubstance(initializers=initializers))
initializers = [InitRiseVelFromDist(distribution=UniformDistribution())]
u_sc = sample_sc_release(
    5, (3., 6., 0.),
    rel_time,
    uncertain=True,
    arr_types={'rise_vel': gat('rise_vel')},
    substance=NonWeatheringSubstance(initializers=initializers))
model_time = rel_time
def make_model(images_dir=os.path.join(base_dir, 'images')):

    print('get contiguous')

    kml_file = os.path.join(base_dir, 'contigua.kml')
    with open(kml_file) as f:
        contiguous = parser.parse(f).getroot().Document

    coordinates = contiguous.Placemark.LineString.coordinates.text.split(' ')
    cont_coord = []
    for x in coordinates:
        x = x.split(',')
        if len(x) > 1 and float(x[1]) > -11.5 and float(x[1]) < -8.5:
            cont_coord.append([float(x[0]), float(x[1])])

    print('initializing the model')

    start_time = datetime(2020, 9, 15, 12, 0)
    mapfile = get_datafile(os.path.join(base_dir, './alagoas-coast.BNA'))

    gnome_map = MapFromBNA(mapfile, refloat_halflife=6)  # hours

    duration = timedelta(days=1)
    timestep = timedelta(minutes=15)
    end_time = start_time + duration

    steps = duration.total_seconds() / timestep.total_seconds()

    print("Total step: %.4i " % (steps))

    # one hour timestep
    model = Model(start_time=start_time,
                  duration=duration,
                  time_step=timestep,
                  map=gnome_map,
                  uncertain=False,
                  cache_enabled=False)

    oil_name = 'GENERIC MEDIUM CRUDE'

    wd = UniformDistribution(low=.0002, high=.0002)

    subs = GnomeOil(oil_name, initializers=plume_initializers(distribution=wd))

    model.spills += point_line_release_spill(release_time=start_time,
                                             start_position=(-35.153, -8.999,
                                                             0.0),
                                             num_elements=1000,
                                             end_release_time=end_time,
                                             substance=subs,
                                             units='kg')
    model.spills += point_line_release_spill(release_time=start_time,
                                             start_position=(-35.176, -9.135,
                                                             0.0),
                                             num_elements=1000,
                                             end_release_time=end_time,
                                             substance=subs,
                                             units='kg')
    model.spills += point_line_release_spill(release_time=start_time,
                                             start_position=(-35.062, -9.112,
                                                             0.0),
                                             num_elements=1000,
                                             end_release_time=end_time,
                                             substance=subs,
                                             units='kg')
    model.spills += point_line_release_spill(release_time=start_time,
                                             start_position=(-34.994, -9.248,
                                                             0.0),
                                             num_elements=1000,
                                             end_release_time=end_time,
                                             substance=subs,
                                             units='kg')

    #for idx in range(0, len(cont_coord),2):
    #    model.spills += point_line_release_spill(num_elements=steps, start_position=(cont_coord[idx][0],cont_coord[idx][1], 0.0),
    #                                     release_time=start_time,
    #                                     end_release_time=start_time + duration,
    #                                     amount=steps,
    #                                     substance = subs,
    #                                    units='kg')

    print('adding outputters')

    renderer = Renderer(mapfile,
                        images_dir,
                        image_size=(900, 600),
                        output_timestep=timedelta(minutes=10),
                        draw_ontop='forecast')
    #set the viewport to zoom in on the map:
    #renderer.viewport = ((-37, -11), (-34, -8)) #alagoas
    renderer.viewport = ((-35.5, -9.5), (-34, -8.5))  #1/4 N alagoas
    model.outputters += renderer

    netcdf_file = os.path.join(base_dir, 'maceio.nc')
    scripting.remove_netcdf(netcdf_file)
    model.outputters += NetCDFOutput(netcdf_file,
                                     which_data='standard',
                                     surface_conc='kde')

    #shp_file = os.path.join(base_dir, 'surface_concentration')
    #scripting.remove_netcdf(shp_file + ".zip")
    #model.outputters += ShapeOutput(shp_file,
    #                                zip_output=False,
    #                                surface_conc="kde",
    #                                )

    print('adding movers:')

    print('adding a RandomMover:')
    model.movers += RandomMover(diffusion_coef=10000)

    print('adding a current mover:')

    # # this is HYCOM currents
    curr_file = get_datafile(os.path.join(base_dir, 'corrente15a28de09.nc'))
    model.movers += GridCurrentMover(curr_file, num_method='Euler')

    print('adding a grid wind mover:')
    wind_file = get_datafile(os.path.join(base_dir, 'vento15a28de09.nc'))
    #topology_file = get_datafile(os.path.join(base_dir, 'WindSpeedDirSubsetTop.dat'))
    #w_mover = GridWindMover(wind_file, topology_file)
    w_mover = GridWindMover(wind_file)
    w_mover.uncertain_speed_scale = 1
    w_mover.uncertain_angle_scale = 0.2  # default is .4
    w_mover.wind_scale = 2

    model.movers += w_mover

    print('adding outputters')

    renderer = Renderer(mapfile,
                        images_dir,
                        image_size=(900, 600),
                        output_timestep=timestep,
                        draw_ontop='forecast')
    #set the viewport to zoom in on the map:
    #renderer.viewport = ((-37, -11), (-34, -8)) #alagoas
    renderer.viewport = ((-35.5, -9.5), (-34, -8.5))  #1/4 N alagoas
    model.outputters += renderer

    netcdf_file = os.path.join(base_dir, 'maragogi.nc')
    scripting.remove_netcdf(netcdf_file)
    model.outputters += NetCDFOutput(netcdf_file,
                                     which_data='standard',
                                     surface_conc='kde')

    return model
Esempio n. 9
0
def make_model(images_dir=os.path.join(base_dir, 'images')):
    print 'initializing the model'

    start_time = datetime(2012, 10, 25, 0, 1)
    # start_time = datetime(2015, 12, 18, 06, 01)

    # 1 day of data in file
    # 1/2 hr in seconds
    model = Model(start_time=start_time,
                  duration=timedelta(hours=2),
                  time_step=900)

    mapfile = get_datafile(os.path.join(base_dir, 'nyharbor.bna'))

    print 'adding the map'
    '''TODO: sort out MapFromBna's map_bounds parameter...
    it does nothing right now, and the spill is out of bounds'''
    model.map = MapFromBNA(mapfile, refloat_halflife=0.0)  # seconds

    # draw_ontop can be 'uncertain' or 'forecast'
    # 'forecast' LEs are in black, and 'uncertain' are in red
    # default is 'forecast' LEs draw on top
    renderer = Renderer(mapfile, images_dir, image_size=(1024, 768))
    #     renderer.viewport = ((-73.5, 40.5), (-73.1, 40.75))
    #     renderer.viewport = ((-122.9, 45.6), (-122.6, 46.0))

    print 'adding outputters'
    model.outputters += renderer

    netcdf_file = os.path.join(base_dir, 'script_ny_plume.nc')
    scripting.remove_netcdf(netcdf_file)

    model.outputters += NetCDFOutput(netcdf_file, which_data='all')

    print 'adding two spills'
    # Break the spill into two spills, first with the larger droplets
    # and second with the smaller droplets.
    # Split the total spill volume (100 m^3) to have most
    # in the larger droplet spill.
    # Smaller droplets start at a lower depth than larger

    end_time = start_time + model.duration
    #     wd = WeibullDistribution(alpha=1.8,
    #                              lambda_=.00456,
    #                              min_=.0002)  # 200 micron min
    #
    #     spill = subsurface_plume_spill(num_elements=10,
    #                                    start_position=(-74.15,
    #                                                    40.5,
    #                                                    7.2),
    #                                    release_time=start_time,
    #                                    distribution=wd,
    #                                    amount=90,  # default volume_units=m^3
    #                                    units='m^3',
    #                                    end_release_time=end_time,
    #                                    density=600)
    #
    #     model.spills += spill

    #     wd = WeibullDistribution(alpha=1.8,
    #                              lambda_=.00456,
    #                              max_=.0002)  # 200 micron max

    oil_name = 'ALASKA NORTH SLOPE (MIDDLE PIPELINE, 1997)'

    wd = UniformDistribution(low=.0002, high=.0002)

    spill = point_line_release_spill(
        num_elements=10,
        amount=90,
        units='m^3',
        start_position=(-74.15, 40.5, 7.2),
        release_time=start_time,
        substance=GnomeOil(oil_name,
                           initializers=plume_initializers(distribution=wd))
        #element_type=plume(distribution=wd,
        #substance_name='ALASKA NORTH SLOPE (MIDDLE PIPELINE, 1997)')
    )
    model.spills += spill

    print 'adding a RandomMover:'
    model.movers += RandomMover(diffusion_coef=50000)

    print 'adding a RiseVelocityMover:'
    model.movers += RiseVelocityMover()

    print 'adding a RandomMover3D:'
    #     model.movers += RandomMover3D(vertical_diffusion_coef_above_ml=5,
    #                                         vertical_diffusion_coef_below_ml=.11,
    #                                         mixed_layer_depth=10)

    # the url is broken, update and include the following four lines
    #     url = ('http://geoport.whoi.edu/thredds/dodsC/clay/usgs/users/jcwarner/Projects/Sandy/triple_nest/00_dir_NYB05.ncml')
    #     gc = GridCurrent.from_netCDF(url)
    #     u_mover = PyCurrentMover(gc, default_num_method='RK2')
    #     model.movers += u_mover

    # print 'adding a wind mover:'

    # series = np.zeros((2, ), dtype=gnome.basic_types.datetime_value_2d)
    # series[0] = (start_time, (30, 90))
    # series[1] = (start_time + timedelta(hours=23), (30, 90))

    # wind = Wind(timeseries=series, units='knot')
    #
    # default is .4 radians
    # w_mover = gnome.movers.WindMover(wind, uncertain_angle_scale=0)
    #
    # model.movers += w_mover

    print 'adding a simple mover:'
    #     s_mover = SimpleMover(velocity=(0.0, -.3, 0.0))
    #     model.movers += s_mover

    return model
Esempio n. 10
0
def make_model(images_dir=os.path.join(base_dir, 'images2')):
    print('initializing the model')

    start_time = datetime(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]),
                          int(sys.argv[4]), int(sys.argv[5]))
    mapfile = get_datafile(os.path.join(base_dir, './brazil-coast.bna'))

    gnome_map = MapFromBNA(mapfile, refloat_halflife=6)  # hours

    # # the image output renderer
    # global renderer
    #duration = timedelta(minutes=5)
    #timestep = timedelta(minutes=5)
    duration = timedelta(minutes=5)
    timestep = timedelta(minutes=5)
    endtime = start_time + duration

    steps = duration.total_seconds() / timestep.total_seconds()

    print("Total step: %.4i " % (steps))

    model = Model(start_time=start_time,
                  duration=duration,
                  time_step=timestep,
                  map=gnome_map,
                  uncertain=False,
                  cache_enabled=False)

    oil_name = 'GENERIC MEDIUM CRUDE'

    wd = UniformDistribution(low=.0002, high=.0002)

    subs = GnomeOil(oil_name, initializers=plume_initializers(distribution=wd))

    #print 'adding a spill'
    #spill = point_line_release_spill(num_elements=122,
    #                                 start_position=(-35.14,
    #                                                 -9.40, 0.0),
    #                                 release_time=start_time)
    #model.spills += spill

    #spill2 = spatial_release_spill(-35.14,-9.40, 0.0, start_time)
    #model.spills += spill2

    #print 'load nc'
    #netcdf_file = os.path.join(base_dir, 'maceio.nc')
    #relnc = InitElemsFromFile(netcdf_file,release_time=start_time)
    #relnc = InitElemsFromFile(netcdf_file,index=5)
    #spillnc = Spill(release=relnc)
    #print spillnc.release.num_elements
    #print spillnc.release.name
    #print spillnc.substance
    #print relnc._init_data['age']
    #print relnc.release_time

    #model.spills += spillnc

    #model._load_spill_data()

    #for sc in model.spills.items():
    #    sc.prepare_for_model_run()

    #print(relnc.num_elements)
    #print(relnc.num_released)

    # add particles - it works
    print('adding particles')
    # Persistent oil spill in contiguous zone border
    if int(sys.argv[6]) == 1:
        release = release_from_splot_data(start_time, 'contiguous.txt')
        print("Adding new particles")
        model.spills += Spill(release=release, substance=subs)

    # Particles from previows simulation step
    try:
        f = open('step.txt')
        f.close()
        release2 = release_from_splot_data(start_time, 'step.txt')
        model.spills += Spill(release=release2, substance=subs)
    except IOError:
        print('No previous step, using only contiguous.txt')

    #assert rel.num_elements == exp_num_elems
    #assert len(rel.start_position) == exp_num_elems
    #cumsum = np.cumsum(exp)
    #for ix in xrange(len(cumsum) - 1):
    #    assert np.all(rel.start_position[cumsum[ix]] ==
    #                  rel.start_position[cumsum[ix]:cumsum[ix + 1]])
    #assert np.all(rel.start_position[0] == rel.start_position[:cumsum[0]])

    #spnc = Spill(release=None)
    #spnc.release = relnc

    print('adding a RandomMover:')
    #model.movers += RandomMover(diffusion_coef=10000, uncertain_factor=2)
    model.movers += RandomMover(diffusion_coef=10000)

    print('adding a current mover:')

    # # this is HYCOM currents
    curr_file = get_datafile(os.path.join(base_dir, 'corrente15a28de09.nc'))
    model.movers += GridCurrentMover(curr_file, num_method='Euler')

    print('adding a grid wind mover:')
    wind_file = get_datafile(os.path.join(base_dir, 'vento15a28de09.nc'))
    #topology_file = get_datafile(os.path.join(base_dir, 'WindSpeedDirSubsetTop.dat'))
    #w_mover = GridWindMover(wind_file, topology_file)
    w_mover = GridWindMover(wind_file)
    w_mover.uncertain_speed_scale = 1
    w_mover.uncertain_angle_scale = 0.2  # default is .4
    w_mover.wind_scale = 2

    model.movers += w_mover

    print('adding outputters')

    renderer = Renderer(mapfile,
                        images_dir,
                        image_size=(900, 600),
                        output_timestep=timestep,
                        draw_ontop='forecast')
    #set the viewport to zoom in on the map:
    #renderer.viewport = ((-37, -11), (-34, -8)) #alagoas
    renderer.viewport = ((-55, -34), (-30, 5))  #1/4 N alagoas
    model.outputters += renderer

    netcdf_file = os.path.join(base_dir, 'step.nc')
    scripting.remove_netcdf(netcdf_file)
    model.outputters += NetCDFOutput(netcdf_file,
                                     which_data='standard',
                                     surface_conc='kde')

    return model