def setUp(self):
        start_lat = 38
        start_lon = -76
        start_depth = -5
        temp_time = datetime.utcnow()
        self.start_time = datetime(temp_time.year, temp_time.month, temp_time.day, temp_time.hour)
        self.loc = Location4D(latitude=start_lat, longitude=start_lon, depth=start_depth, time=self.start_time)

        # Generate time,u,v,z as randoms
        # 48 timesteps at an hour each = 2 days of running
        self.times = list(range(0, 172800, 3600))  # in seconds
        self.u = []
        self.v = []
        self.z = []
        for w in range(0, 48):
            self.z.append(random.gauss(0, 0.0001))  # gaussian in m/s
            self.u.append(abs(AsaRandom.random()))  # random function in m/s
            self.v.append(abs(AsaRandom.random()))  # random function in m/s

        self.particles = []
        # Create particles
        for i in range(0, 3):
            p = Particle()
            p.location = self.loc
            self.particles.append(p)

        # Create a transport instance with horiz and vert dispersions
        self.transport_model = Transport(horizDisp=0.05, vertDisp=0.00003)
Exemple #2
0
    def setUp(self):
        start_lat = 38
        start_lon = -76
        start_depth = -5
        temp_time = datetime.utcnow()
        self.start_time = datetime(temp_time.year, temp_time.month,
                                   temp_time.day, temp_time.hour)
        self.loc = Location4D(latitude=start_lat,
                              longitude=start_lon,
                              depth=start_depth,
                              time=self.start_time)

        # Generate time,u,v,z as randoms
        # 48 timesteps at an hour each = 2 days of running
        self.times = range(0, 172800, 3600)  # in seconds
        self.u = []
        self.v = []
        self.z = []
        for w in xrange(0, 48):
            self.z.append(random.gauss(0, 0.0001))  # gaussian in m/s
            self.u.append(abs(AsaRandom.random()))  # random function in m/s
            self.v.append(abs(AsaRandom.random()))  # random function in m/s

        self.particles = []
        # Create particles
        for i in xrange(0, 3):
            p = Particle()
            p.location = self.loc
            self.particles.append(p)

        # Create a transport instance with horiz and vert dispersions
        self.transport_model = Transport(horizDisp=0.05, vertDisp=0.00003)
Exemple #3
0
    def move(self, particle, u, v, w, modelTimestep, **kwargs):
        """
        Returns the lat, lon, H, and velocity of a projected point given a starting
        lat and lon (dec deg), a depth (m) below sea surface (positive up), u, v, and w velocity components (m/s), a horizontal and vertical
        displacement coefficient (m^2/s) H (m), and a model timestep (s).

        GreatCircle calculations are done based on the Vincenty Direct method.

        Returns a dict like:
            {   'latitude': x, 
                'azimuth': x,
                'reverse_azimuth': x, 
                'longitude': x, 
                'depth': x, 
                'u': x
                'v': x, 
                'w': x, 
                'distance': x, 
                'angle': x, 
                'vertical_distance': x, 
                'vertical_angle': x }
        """

        logger.debug("U: %s, V: %s, W: %s" % (str(u), str(v), str(w)))

        # IMPORTANT:
        # If we got no data from the model, we are using the last available value stored in the particles!
        if (u is None) or (u is not None and math.isnan(u)):
            u = particle.last_u()
        if (v is None) or (v is not None and math.isnan(v)):
            v = particle.last_v()
        if (w is None) or (w is not None and math.isnan(w)):
            w = particle.last_w()

        particle.u_vector = u
        particle.v_vector = v
        particle.w_vector = w

        if particle.halted:
            u, v, w = 0, 0, 0
        else:
            u += AsaRandom.random() * ((2 * self._horizDisp / modelTimestep)**
                                       0.5)  # u transformation calcualtions
            v += AsaRandom.random() * ((2 * self._horizDisp / modelTimestep)**
                                       0.5)  # v transformation calcualtions
            w += AsaRandom.random() * ((2 * self._vertDisp / modelTimestep)**
                                       0.5)  # w transformation calculations

        result = AsaTransport.distance_from_location_using_u_v_w(
            u=u, v=v, w=w, timestep=modelTimestep, location=particle.location)
        result['u'] = u
        result['v'] = v
        result['w'] = w
        return result
Exemple #4
0
    def move(self, particle, u, v, w, modelTimestep, **kwargs):
        """
        Returns the lat, lon, H, and velocity of a projected point given a starting
        lat and lon (dec deg), a depth (m) below sea surface (positive up), u, v, and w velocity components (m/s), a horizontal and vertical
        displacement coefficient (m^2/s) H (m), and a model timestep (s).

        GreatCircle calculations are done based on the Vincenty Direct method.

        Returns a dict like:
            {   'latitude': x, 
                'azimuth': x,
                'reverse_azimuth': x, 
                'longitude': x, 
                'depth': x, 
                'u': x
                'v': x, 
                'w': x, 
                'distance': x, 
                'angle': x, 
                'vertical_distance': x, 
                'vertical_angle': x }
        """

        logger.debug("U: %s, V: %s, W: %s" % (str(u),str(v),str(w)))

        # IMPORTANT:
        # If we got no data from the model, we are using the last available value stored in the particles!
        if (u is None) or (u is not None and math.isnan(u)):
            u = particle.last_u()
        if (v is None) or (v is not None and math.isnan(v)):
            v = particle.last_v()
        if (w is None) or (w is not None and math.isnan(w)):
            w = particle.last_w()

        particle.u_vector = u
        particle.v_vector = v
        particle.w_vector = w

        if particle.halted:
            u,v,w = 0,0,0
        else:
            u += AsaRandom.random() * ((2 * self._horizDisp / modelTimestep) ** 0.5) # u transformation calcualtions
            v += AsaRandom.random() * ((2 * self._horizDisp / modelTimestep) ** 0.5) # v transformation calcualtions
            w += AsaRandom.random() * ((2 * self._vertDisp / modelTimestep) ** 0.5) # w transformation calculations

        result = AsaTransport.distance_from_location_using_u_v_w(u=u, v=v, w=w, timestep=modelTimestep, location=particle.location)
        result['u'] = u
        result['v'] = v
        result['w'] = w
        return result
    def __reverse(self, **kwargs):
        """
            Reverse particle just off of the shore in the direction that it came in.
            Adds a slight random factor to the distance and angle it is reversed in.
        """

        #st = time.clock()

        start_point = kwargs.pop('start_point')
        hit_point = kwargs.pop('hit_point')
        reverse_azimuth = kwargs.pop('reverse_azimuth')
        reverse_distance = kwargs.get('reverse_distance', None)
        if reverse_distance is None:
            reverse_distance = 100

        # Randomize the reverse angle slightly (+/- 5 degrees)
        random_azimuth = reverse_azimuth + AsaRandom.random() * 5

        count = 0
        nudge_distance = 0.01
        nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point)
        nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth)

        # Find point just offshore to do testing with.  Try 15 times (~350m).  This makes sure the start_point is in the water
        # for the next call to intersect (next while loop).
        while self.intersect(single_point=nudge_loc.point) and count < 16:
            nudge_distance *= 2
            nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point)
            nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth)
            count += 1

        # We tried 16 times and couldn't find a point.  This should totally never happen.
        if count == 16:
            logger.warn("LOOK: Could not find location in water to do shoreline calculation with.  Assuming particle did not move from original location")
            return start_point

        # Keep trying to throw particle back, halfing the distance each time until it is in water.
        # Only half it 6 times before giving up and returning the point which the particle came from.
        count = 0
        # Distance amount to half each iteration
        changing_distance = reverse_distance
        new_point = AsaGreatCircle.great_circle(distance=reverse_distance, azimuth=random_azimuth, start_point=hit_point)
        new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)

        # We don't want to reverse further than the current spatial buffer, because we will reindex the
        # source file everytime we reverse, which will slow down the calculations considerably.
        while (not self._spatial_query_object.contains(new_loc.point) or self.intersect(start_point=nudge_loc.point, end_point=new_loc.point)) and count < 6:
            changing_distance /= 2
            new_point = AsaGreatCircle.great_circle(distance=changing_distance, azimuth=random_azimuth, start_point=hit_point)
            new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)
            count += 1

        # We tried 10 times and the particle was still on shore, return the point the particle started from.
        # No randomization.
        if count == 6:
            logger.warn("LOOK: Could not react particle with shoreline.  Assuming particle did not move from original location")
            return start_point

        #logger.info("Reaction time: %f" % (time.clock() - st))
        return new_loc
    def test_create_random_filename(self):
        temp_filename = AsaRandom.filename(prefix="superduper", suffix=".nc")

        path = urlparse(temp_filename).path
        name, ext = os.path.splitext(path)

        assert name.index("superduper") == 0
        assert ext == ".nc"
Exemple #7
0
    def test_create_random_filename(self):
        temp_filename = AsaRandom.filename(prefix="superduper", suffix=".nc")

        path = urlparse(temp_filename).path
        name, ext = os.path.splitext(path)

        assert name.index("superduper") == 0
        assert ext == ".nc"
    def __reverse(self, **kwargs):
        """
            Reverse particle just off of the shore in the direction that it came in.
            Adds a slight random factor to the distance and angle it is reversed in.
        """
        start_point = kwargs.pop('start_point')
        hit_point = kwargs.pop('hit_point')
        distance = kwargs.pop('distance')
        azimuth = kwargs.pop('azimuth')
        reverse_azimuth = kwargs.pop('reverse_azimuth')
        reverse_distance = kwargs.get('reverse_distance', None)
        if reverse_distance is None:
            reverse_distance = 100

        # Randomize the reverse angle slightly (+/- 5 degrees)
        random_azimuth = reverse_azimuth + AsaRandom.random() * 5

        count = 0
        nudge_distance = 0.01
        nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point)
        nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth)

        # Find point just offshore to do testing with.  Try 15 times (~350m).  This makes sure the start_point is in the water
        # for the next call to intersect (next while loop).
        while self.intersect(single_point=nudge_loc.point) and count < 16:
            nudge_distance *= 2
            nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance, azimuth=reverse_azimuth, start_point=hit_point)
            nudge_loc = Location4D(latitude=nudge_point['latitude'], longitude=nudge_point['longitude'], depth=start_point.depth)
            count += 1

        # We tried 16 times and couldn't find a point.  This should totally never happen.
        if count == 16:
            logger.debug("WOW. Could not find location in water to do shoreline calculation with.  Assuming particle did not move from original location")
            return start_point

        # Keep trying to throw particle back, halfing the distance each time until it is in water.
        # Only half it 12 times before giving up and returning the point which the particle came from.
        count = 0
        # Distance amount to half each iteration
        changing_distance = reverse_distance
        new_point = AsaGreatCircle.great_circle(distance=reverse_distance, azimuth=random_azimuth, start_point=hit_point)
        new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)
        while self.intersect(start_point=nudge_loc.point, end_point=new_loc.point) and count < 12:
            changing_distance /= 2
            new_point = AsaGreatCircle.great_circle(distance=changing_distance, azimuth=random_azimuth, start_point=hit_point)
            new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)
            count += 1

        # We tried 10 times and the particle was still on shore, return the point the particle started from.
        # No randomization.
        if count == 12:
            logger.debug("Could not react particle with shoreline.  Assuming particle did not move from original location")
            return start_point

        return new_loc
    def setup_run(self, **kwargs):

        super(CachingModelController, self).setup_run(**kwargs)

        # Should we remove the cache file at the end of the run?
        self.remove_cache        = kwargs.get("remove_cache", False)
        self.cache_path          = kwargs.get("cache_path", None)

        # Create a temp file for the cache if nothing was passed in
        if self.cache_path is None:
            default_cache_dir = os.path.join(os.path.dirname(__file__), "_cache")
            temp_name = AsaRandom.filename(prefix=str(datetime.now().microsecond), suffix=".nc")
            self.cache_path = os.path.join(default_cache_dir, temp_name)

        # Be sure the cache directory exists
        if not os.path.exists(os.path.dirname(self.cache_path)):
            logger.info("Creating cache directory: %s" % self.cache_path)
            os.makedirs(os.path.dirname(self.cache_path))

        # Create the shared state objects

        # Particles use this to tell the Data Controller to "get_data".
        # The CachingDataController sets this to False when it is done writing to the cache file.
        # Particles will wait for this to be False before reading from the cache file.
        # If we are caching, this starts as True so the Particles don't take off.  If we
        # are not caching, this is False so the Particles can start immediatly.
        self.get_data = self.mgr.Value('bool', True)
        # Particles use this to tell the DataContoller which indices to 'get_data' for
        self.point_get = self.mgr.Value('list', [0, 0, 0])

        # This locks access to the 'has_data_request_lock' value
        self.data_request_lock = self.mgr.Lock()
        # This tracks which Particle PID is asking the CachingDataController for data
        self.has_data_request_lock = self.mgr.Value('int', -1)

        # The lock that controls access to modifying 'has_read_lock' and 'read_count'
        self.read_lock = self.mgr.Lock()
        # List of Particle PIDs that are reading from the cache
        self.has_read_lock = self.mgr.list()
        # The number of Particles that are reading from the cache
        self.read_count = self.mgr.Value('int', 0)

        # When something is writing to the cache file
        self.write_lock = self.mgr.Lock()
        # PID of process with lock
        self.has_write_lock = self.mgr.Value('int', -1)
Exemple #10
0
    def __reverse(self, **kwargs):
        """
            Reverse particle just off of the shore in the direction that it came in.
            Adds a slight random factor to the distance and angle it is reversed in.
        """
        start_point = kwargs.pop('start_point')
        hit_point = kwargs.pop('hit_point')
        distance = kwargs.pop('distance')
        azimuth = kwargs.pop('azimuth')
        reverse_azimuth = kwargs.pop('reverse_azimuth')
        reverse_distance = kwargs.get('reverse_distance', None)
        if reverse_distance is None:
            reverse_distance = 100

        # Randomize the reverse angle slightly (+/- 5 degrees)
        random_azimuth = reverse_azimuth + AsaRandom.random() * 5

        # Nudge the hitpoint off of the shore by a tiny bit to test shoreline intersection in while loop.
        nudged_hit_point = AsaGreatCircle.great_circle(distance=0.01, azimuth=random_azimuth, start_point=hit_point)
        nudged_hit_location = Location4D(latitude=nudged_hit_point['latitude'], longitude=nudged_hit_point['longitude'], depth=start_point.depth)

        new_point = AsaGreatCircle.great_circle(distance=reverse_distance, azimuth=random_azimuth, start_point=hit_point)
        new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)

        # Keep trying to throw particle back, halfing the distance each time until it is in water.
        # Only half it 10 times before giving up and returning the point which the particle came from.
        count = 0
        # Distance amount to half each iteration
        changing_distance = reverse_distance
        while self.intersect(start_point=nudged_hit_location.point, end_point=new_loc.point) and count < 10:
            changing_distance /= 2
            new_point = AsaGreatCircle.great_circle(distance=changing_distance, azimuth=random_azimuth, start_point=hit_point)
            new_loc = Location4D(latitude=new_point['latitude'], longitude=new_point['longitude'], depth=start_point.depth)
            count += 1

        # We tried 10 times and the particle was still on shore, return the point the particle started from.
        # No randomization.
        if count == 10:
            logger.warn("Could not react particle with shoreline.  Assuming particle did not move from original location")
            new_loc = start_point

        return new_loc
    def run(self, hydrodataset, **kwargs):

        # Add ModelController description to logfile
        logger.info(self)

        # Add the model descriptions to logfile
        for m in self._models:
            logger.info(m)

        # Calculate the model timesteps
        # We need times = len(self._nstep) + 1 since data is stored one timestep
        # after a particle is forced with the final timestep's data.
        times = range(0, (self._step * self._nstep) + 1, self._step)
        # Calculate a datetime object for each model timestep
        # This method is duplicated in DataController and ForceParticle
        # using the 'times' variables above.  Will be useful in those other
        # locations for particles released at different times
        # i.e. released over a few days
        modelTimestep, self.datetimes = AsaTransport.get_time_objects_from_model_timesteps(
            times, start=self.start)

        time_chunk = self._time_chunk
        horiz_chunk = self._horiz_chunk
        low_memory = kwargs.get("low_memory", False)

        # Should we remove the cache file at the end of the run?
        remove_cache = kwargs.get("remove_cache", True)

        self.bathy_path = kwargs.get("bathy", None)

        self.cache_path = kwargs.get("cache", None)
        if self.cache_path is None:
            # Generate temp filename for dataset cache
            default_cache_dir = os.path.join(os.path.dirname(__file__),
                                             "_cache")
            temp_name = AsaRandom.filename(prefix=str(
                datetime.now().microsecond),
                                           suffix=".nc")
            self.cache_path = os.path.join(default_cache_dir, temp_name)

        logger.progress((1, "Setting up particle start locations"))
        point_locations = []
        if isinstance(self.geometry, Point):
            point_locations = [self.reference_location] * self._npart
        elif isinstance(self.geometry, Polygon) or isinstance(
                self.geometry, MultiPolygon):
            point_locations = [
                Location4D(latitude=loc.y,
                           longitude=loc.x,
                           depth=self._depth,
                           time=self.start)
                for loc in AsaTransport.fill_polygon_with_points(
                    goal=self._npart, polygon=self.geometry)
            ]

        # Initialize the particles
        logger.progress((2, "Initializing particles"))
        for x in xrange(0, self._npart):
            p = LarvaParticle(id=x)
            p.location = point_locations[x]
            # We don't need to fill the location gaps here for environment variables
            # because the first data collected actually relates to this original
            # position.
            # We do need to fill in fields such as settled, halted, etc.
            p.fill_status_gap()
            # Set the inital note
            p.note = p.outputstring()
            p.notes.append(p.note)
            self.particles.append(p)

        # This is where it makes sense to implement the multiprocessing
        # looping for particles and models. Can handle each particle in
        # parallel probably.
        #
        # Get the number of cores (may take some tuning) and create that
        # many workers then pass particles into the queue for the workers
        mgr = multiprocessing.Manager()
        nproc = multiprocessing.cpu_count() - 1
        if nproc <= 0:
            raise ValueError(
                "Model does not run using less than two CPU cores")

        # Each particle is a task, plus the DataController
        number_of_tasks = len(self.particles) + 1

        # We need a process for each particle and one for the data controller
        nproc = min(number_of_tasks, nproc)

        # When a particle requests data
        data_request_lock = mgr.Lock()
        # PID of process with lock
        has_data_request_lock = mgr.Value('int', -1)

        nproc_lock = mgr.Lock()

        # Create the task queue for all of the particles and the DataController
        tasks = multiprocessing.JoinableQueue(number_of_tasks)
        # Create the result queue for all of the particles and the DataController
        results = mgr.Queue(number_of_tasks)

        # Create the shared state objects
        get_data = mgr.Value('bool', True)
        # Number of tasks
        n_run = mgr.Value('int', number_of_tasks)
        updating = mgr.Value('bool', False)

        # When something is reading from cache file
        read_lock = mgr.Lock()
        # list of PIDs that are reading
        has_read_lock = mgr.list()
        read_count = mgr.Value('int', 0)

        # When something is writing to the cache file
        write_lock = mgr.Lock()
        # PID of process with lock
        has_write_lock = mgr.Value('int', -1)

        point_get = mgr.Value('list', [0, 0, 0])
        active = mgr.Value('bool', True)

        logger.progress((3, "Initializing and caching hydro model's grid"))
        try:
            ds = CommonDataset.open(hydrodataset)
            # Query the dataset for common variable names
            # and the time variable.
            logger.debug("Retrieving variable information from dataset")
            common_variables = self.get_common_variables_from_dataset(ds)

            logger.debug("Pickling time variable to disk for particles")
            timevar = ds.gettimevar(common_variables.get("u"))
            f, timevar_pickle_path = tempfile.mkstemp()
            os.close(f)
            f = open(timevar_pickle_path, "wb")
            pickle.dump(timevar, f)
            f.close()
            ds.closenc()
        except:
            logger.warn("Failed to access remote dataset %s" % hydrodataset)
            raise DataControllerError("Inaccessible DAP endpoint: %s" %
                                      hydrodataset)

        # Add data controller to the queue first so that it
        # can get the initial data and is not blocked

        logger.debug('Starting DataController')
        logger.progress((4, "Starting processes"))
        data_controller = parallel.DataController(hydrodataset,
                                                  common_variables,
                                                  n_run,
                                                  get_data,
                                                  write_lock,
                                                  has_write_lock,
                                                  read_lock,
                                                  read_count,
                                                  time_chunk,
                                                  horiz_chunk,
                                                  times,
                                                  self.start,
                                                  point_get,
                                                  self.reference_location,
                                                  low_memory=low_memory,
                                                  cache=self.cache_path)
        tasks.put(data_controller)
        # Create DataController worker
        data_controller_process = parallel.Consumer(tasks,
                                                    results,
                                                    n_run,
                                                    nproc_lock,
                                                    active,
                                                    get_data,
                                                    name="DataController")
        data_controller_process.start()

        logger.debug('Adding %i particles as tasks' % len(self.particles))
        for part in self.particles:
            forcing = parallel.ForceParticle(
                part,
                hydrodataset,
                common_variables,
                timevar_pickle_path,
                times,
                self.start,
                self._models,
                self.reference_location.point,
                self._use_bathymetry,
                self._use_shoreline,
                self._use_seasurface,
                get_data,
                n_run,
                read_lock,
                has_read_lock,
                read_count,
                point_get,
                data_request_lock,
                has_data_request_lock,
                reverse_distance=self.reverse_distance,
                bathy=self.bathy_path,
                shoreline_path=self.shoreline_path,
                cache=self.cache_path,
                time_method=self.time_method)
            tasks.put(forcing)

        # Create workers for the particles.
        procs = [
            parallel.Consumer(tasks,
                              results,
                              n_run,
                              nproc_lock,
                              active,
                              get_data,
                              name="ForceParticle-%d" % i)
            for i in xrange(nproc - 1)
        ]
        for w in procs:
            w.start()
            logger.debug('Started %s' % w.name)

        # Get results back from queue, test for failed particles
        return_particles = []
        retrieved = 0.
        error_code = 0

        logger.info("Waiting for %i particle results" % len(self.particles))
        logger.progress((5, "Running model"))
        while retrieved < number_of_tasks:
            try:
                # Returns a tuple of code, result
                code, tempres = results.get(timeout=240)
            except Queue.Empty:
                # Poll the active processes to make sure they are all alive and then continue with loop
                if not data_controller_process.is_alive(
                ) and data_controller_process.exitcode != 0:
                    # Data controller is zombied, kill off other processes.
                    get_data.value == False
                    results.put((-2, "DataController"))

                new_procs = []
                old_procs = []
                for p in procs:
                    if not p.is_alive() and p.exitcode != 0:
                        # Do what the Consumer would do if something finished.
                        # Add something to results queue
                        results.put((-3, "ZombieParticle"))
                        # Decrement nproc (DataController exits when this is 0)
                        with nproc_lock:
                            n_run.value = n_run.value - 1

                        # Remove task from queue (so they can be joined later on)
                        tasks.task_done()

                        # Start a new Consumer.  It will exit if there are no tasks available.
                        np = parallel.Consumer(tasks,
                                               results,
                                               n_run,
                                               nproc_lock,
                                               active,
                                               get_data,
                                               name=p.name)
                        new_procs.append(np)
                        old_procs.append(p)

                        # Release any locks the PID had
                        if p.pid in has_read_lock:
                            with read_lock:
                                read_count.value -= 1
                                has_read_lock.remove(p.pid)

                        if has_data_request_lock.value == p.pid:
                            has_data_request_lock.value = -1
                            try:
                                data_request_lock.release()
                            except:
                                pass

                        if has_write_lock.value == p.pid:
                            has_write_lock.value = -1
                            try:
                                write_lock.release()
                            except:
                                pass

                for p in old_procs:
                    try:
                        procs.remove(p)
                    except ValueError:
                        logger.warn(
                            "Did not find %s in the list of processes.  Continuing on."
                            % p.name)

                for p in new_procs:
                    procs.append(p)
                    logger.warn(
                        "Started a new consumer (%s) to replace a zombie consumer"
                        % p.name)
                    p.start()

            else:
                # We got one.
                retrieved += 1
                if code == None:
                    logger.warn("Got an unrecognized response from a task.")
                elif code == -1:
                    logger.warn("Particle %s has FAILED!!" % tempres.uid)
                elif code == -2:
                    error_code = code
                    logger.warn(
                        "DataController has FAILED!!  Removing cache file so the particles fail."
                    )
                    try:
                        os.remove(self.cache_path)
                    except OSError:
                        logger.debug(
                            "Could not remove cache file, it probably never existed"
                        )
                        pass
                elif code == -3:
                    error_code = code
                    logger.info(
                        "A zombie process was caught and task was removed from queue"
                    )
                elif isinstance(tempres, Particle):
                    logger.info("Particle %d finished" % tempres.uid)
                    return_particles.append(tempres)
                    # We mulitply by 95 here to save 5% for the exporting
                    logger.progress(
                        (round((retrieved / number_of_tasks) * 90.,
                               1), "Particle %d finished" % tempres.uid))
                elif tempres == "DataController":
                    logger.info("DataController finished")
                    logger.progress((round((retrieved / number_of_tasks) * 90.,
                                           1), "DataController finished"))
                else:
                    logger.info("Got a strange result on results queue")
                    logger.info(str(tempres))

                logger.info("Retrieved %i/%i results" %
                            (int(retrieved), number_of_tasks))

        if len(return_particles) != len(self.particles):
            logger.warn(
                "Some particles failed and are not included in the output")

        # The results queue should be empty at this point
        assert results.empty() is True

        # Should be good to join on the tasks now that the queue is empty
        logger.info("Joining the task queue")
        tasks.join()

        # Join all processes
        logger.info("Joining the processes")
        for w in procs + [data_controller_process]:
            # Wait 10 seconds
            w.join(10.)
            if w.is_alive():
                # Process is hanging, kill it.
                logger.info(
                    "Terminating %s forcefully.  This should have exited itself."
                    % w.name)
                w.terminate()

        logger.info('Workers complete')

        self.particles = return_particles

        # Remove Manager so it shuts down
        del mgr

        # Remove pickled timevar
        os.remove(timevar_pickle_path)

        # Remove the cache file
        if remove_cache is True:
            try:
                os.remove(self.cache_path)
            except OSError:
                logger.debug(
                    "Could not remove cache file, it probably never existed")

        logger.progress((96, "Exporting results"))

        if len(self.particles) > 0:
            # If output_formats and path specified,
            # output particle run data to disk when completed
            if "output_formats" in kwargs:
                # Make sure output_path is also included
                if kwargs.get("output_path", None) != None:
                    formats = kwargs.get("output_formats")
                    output_path = kwargs.get("output_path")
                    if isinstance(formats, list):
                        for format in formats:
                            logger.info("Exporting to: %s" % format)
                            try:
                                self.export(output_path, format=format)
                            except:
                                logger.error("Failed to export to: %s" %
                                             format)
                    else:
                        logger.warn(
                            'The output_formats parameter should be a list, not saving any output!'
                        )
                else:
                    logger.warn(
                        'No output path defined, not saving any output!')
            else:
                logger.warn('No output format defined, not saving any output!')
        else:
            logger.warn("Model didn't actually do anything, check the log.")
            if error_code == -2:
                raise DataControllerError("Error in the DataController")
            else:
                raise ModelError("Error in the model")

        logger.progress((99, "Model Run Complete"))
        return
    def run(self, hydrodataset, **kwargs):

        # Add ModelController description to logfile
        logger.info(self)

        # Add the model descriptions to logfile
        for m in self._models:
            logger.info(m)

        # Calculate the model timesteps
        # We need times = len(self._nstep) + 1 since data is stored one timestep
        # after a particle is forced with the final timestep's data.
        times = range(0,(self._step*self._nstep)+1,self._step)
        # Calculate a datetime object for each model timestep
        # This method is duplicated in DataController and ForceParticle
        # using the 'times' variables above.  Will be useful in those other
        # locations for particles released at different times
        # i.e. released over a few days
        modelTimestep, self.datetimes = AsaTransport.get_time_objects_from_model_timesteps(times, start=self.start)

        time_chunk = self._time_chunk
        horiz_chunk = self._horiz_chunk
        low_memory = kwargs.get("low_memory", False)

        # Should we remove the cache file at the end of the run?
        remove_cache = kwargs.get("remove_cache", True)

        self.bathy_path = kwargs.get("bathy", None)

        self.cache_path = kwargs.get("cache", None)
        if self.cache_path is None:
            # Generate temp filename for dataset cache
            default_cache_dir = os.path.join(os.path.dirname(__file__), "_cache")
            temp_name = AsaRandom.filename(prefix=str(datetime.now().microsecond), suffix=".nc")
            self.cache_path = os.path.join(default_cache_dir, temp_name)
        
        logger.progress((1, "Setting up particle start locations"))
        point_locations = []
        if isinstance(self.geometry, Point):
            point_locations = [self.reference_location] * self._npart
        elif isinstance(self.geometry, Polygon) or isinstance(self.geometry, MultiPolygon):
            point_locations = [Location4D(latitude=loc.y, longitude=loc.x, depth=self._depth, time=self.start) for loc in AsaTransport.fill_polygon_with_points(goal=self._npart, polygon=self.geometry)]

        # Initialize the particles
        logger.progress((2, "Initializing particles"))
        for x in xrange(0, self._npart):
            p = LarvaParticle(id=x)
            p.location = point_locations[x]
            # We don't need to fill the location gaps here for environment variables
            # because the first data collected actually relates to this original
            # position.
            # We do need to fill in fields such as settled, halted, etc.
            p.fill_status_gap()
            # Set the inital note
            p.note = p.outputstring()
            p.notes.append(p.note)
            self.particles.append(p)

        # This is where it makes sense to implement the multiprocessing
        # looping for particles and models. Can handle each particle in 
        # parallel probably.
        #
        # Get the number of cores (may take some tuning) and create that
        # many workers then pass particles into the queue for the workers
        mgr = multiprocessing.Manager()
        nproc = multiprocessing.cpu_count() - 1
        if nproc <= 0:
            raise ValueError("Model does not run using less than two CPU cores")

        # Each particle is a task, plus the DataController
        number_of_tasks = len(self.particles) + 1

        # We need a process for each particle and one for the data controller
        nproc = min(number_of_tasks, nproc)

        # When a particle requests data
        data_request_lock = mgr.Lock()
        # PID of process with lock
        has_data_request_lock = mgr.Value('int',-1)

        nproc_lock = mgr.Lock()
        
        # Create the task queue for all of the particles and the DataController
        tasks = multiprocessing.JoinableQueue(number_of_tasks)
        # Create the result queue for all of the particles and the DataController
        results = mgr.Queue(number_of_tasks)
        
        # Create the shared state objects
        get_data = mgr.Value('bool', True)
        # Number of tasks
        n_run = mgr.Value('int', number_of_tasks)
        updating = mgr.Value('bool', False)

        # When something is reading from cache file
        read_lock = mgr.Lock()
        # list of PIDs that are reading
        has_read_lock = mgr.list()
        read_count = mgr.Value('int', 0)

        # When something is writing to the cache file
        write_lock = mgr.Lock()
        # PID of process with lock
        has_write_lock = mgr.Value('int',-1)

        point_get = mgr.Value('list', [0, 0, 0])
        active = mgr.Value('bool', True)
        
        logger.progress((3, "Initializing and caching hydro model's grid"))
        try:
            ds = CommonDataset.open(hydrodataset)
            # Query the dataset for common variable names
            # and the time variable.
            logger.debug("Retrieving variable information from dataset")
            common_variables = self.get_common_variables_from_dataset(ds)

            logger.debug("Pickling time variable to disk for particles")
            timevar = ds.gettimevar(common_variables.get("u"))
            f, timevar_pickle_path = tempfile.mkstemp()
            os.close(f)
            f = open(timevar_pickle_path, "wb")
            pickle.dump(timevar, f)
            f.close()
            ds.closenc()
        except:
            logger.warn("Failed to access remote dataset %s" % hydrodataset)
            raise DataControllerError("Inaccessible DAP endpoint: %s" % hydrodataset)


        # Add data controller to the queue first so that it 
        # can get the initial data and is not blocked
        
        logger.debug('Starting DataController')
        logger.progress((4, "Starting processes"))
        data_controller = parallel.DataController(hydrodataset, common_variables, n_run, get_data, write_lock, has_write_lock, read_lock, read_count,
                                                  time_chunk, horiz_chunk, times,
                                                  self.start, point_get, self.reference_location,
                                                  low_memory=low_memory,
                                                  cache=self.cache_path)
        tasks.put(data_controller)
        # Create DataController worker
        data_controller_process = parallel.Consumer(tasks, results, n_run, nproc_lock, active, get_data, name="DataController")
        data_controller_process.start()
        
        logger.debug('Adding %i particles as tasks' % len(self.particles))
        for part in self.particles:
            forcing = parallel.ForceParticle(part,
                                        hydrodataset,
                                        common_variables,
                                        timevar_pickle_path,
                                        times,
                                        self.start,
                                        self._models,
                                        self.reference_location.point,
                                        self._use_bathymetry,
                                        self._use_shoreline,
                                        self._use_seasurface,
                                        get_data,
                                        n_run,
                                        read_lock,
                                        has_read_lock,
                                        read_count,
                                        point_get,
                                        data_request_lock,
                                        has_data_request_lock,
                                        reverse_distance=self.reverse_distance,
                                        bathy=self.bathy_path,
                                        shoreline_path=self.shoreline_path,
                                        shoreline_feature=self.shoreline_feature,
                                        cache=self.cache_path,
                                        time_method=self.time_method)
            tasks.put(forcing)

        # Create workers for the particles.
        procs = [ parallel.Consumer(tasks, results, n_run, nproc_lock, active, get_data, name="ForceParticle-%d"%i)
                  for i in xrange(nproc - 1) ]
        for w in procs:
            w.start()
            logger.debug('Started %s' % w.name)

        # Get results back from queue, test for failed particles
        return_particles = []
        retrieved = 0.
        error_code = 0

        logger.info("Waiting for %i particle results" % len(self.particles))
        logger.progress((5, "Running model"))
        while retrieved < number_of_tasks:
            try:
                # Returns a tuple of code, result
                code, tempres = results.get(timeout=240)
            except Queue.Empty:
                # Poll the active processes to make sure they are all alive and then continue with loop
                if not data_controller_process.is_alive() and data_controller_process.exitcode != 0:
                    # Data controller is zombied, kill off other processes.
                    get_data.value == False
                    results.put((-2, "DataController"))

                new_procs = []
                old_procs = []
                for p in procs:
                    if not p.is_alive() and p.exitcode != 0:
                        # Do what the Consumer would do if something finished.
                        # Add something to results queue
                        results.put((-3, "ZombieParticle"))
                        # Decrement nproc (DataController exits when this is 0)
                        with nproc_lock:
                            n_run.value = n_run.value - 1

                        # Remove task from queue (so they can be joined later on)
                        tasks.task_done()

                        # Start a new Consumer.  It will exit if there are no tasks available.
                        np = parallel.Consumer(tasks, results, n_run, nproc_lock, active, get_data, name=p.name)
                        new_procs.append(np)
                        old_procs.append(p)
                        
                        # Release any locks the PID had
                        if p.pid in has_read_lock:
                            with read_lock:
                                read_count.value -= 1
                                has_read_lock.remove(p.pid)

                        if has_data_request_lock.value == p.pid:
                            has_data_request_lock.value = -1
                            try:
                                data_request_lock.release()
                            except:
                                pass
                            
                        if has_write_lock.value == p.pid:
                            has_write_lock.value = -1
                            try:
                                write_lock.release()
                            except:
                                pass
                            

                for p in old_procs:
                    try:
                        procs.remove(p)
                    except ValueError:
                        logger.warn("Did not find %s in the list of processes.  Continuing on." % p.name)

                for p in new_procs:
                    procs.append(p)
                    logger.warn("Started a new consumer (%s) to replace a zombie consumer" % p.name)
                    p.start()
                
            else:
                # We got one.
                retrieved += 1
                if code == None:
                    logger.warn("Got an unrecognized response from a task.")
                elif code == -1:
                    logger.warn("Particle %s has FAILED!!" % tempres.uid)
                elif code == -2:
                    error_code = code
                    logger.warn("DataController has FAILED!!  Removing cache file so the particles fail.")
                    try:
                        os.remove(self.cache_path)
                    except OSError:
                        logger.debug("Could not remove cache file, it probably never existed")
                        pass
                elif code == -3:
                    error_code = code
                    logger.info("A zombie process was caught and task was removed from queue")
                elif isinstance(tempres, Particle):
                    logger.info("Particle %d finished" % tempres.uid)
                    return_particles.append(tempres)
                    # We mulitply by 95 here to save 5% for the exporting
                    logger.progress((round((retrieved / number_of_tasks) * 90.,1), "Particle %d finished" % tempres.uid))
                elif tempres == "DataController":
                    logger.info("DataController finished")
                    logger.progress((round((retrieved / number_of_tasks) * 90.,1), "DataController finished"))
                else:
                    logger.info("Got a strange result on results queue")
                    logger.info(str(tempres))

                logger.info("Retrieved %i/%i results" % (int(retrieved),number_of_tasks))
        
        if len(return_particles) != len(self.particles):
            logger.warn("Some particles failed and are not included in the output")

        # The results queue should be empty at this point
        assert results.empty() is True

        # Should be good to join on the tasks now that the queue is empty
        logger.info("Joining the task queue")
        tasks.join()

        # Join all processes
        logger.info("Joining the processes")
        for w in procs + [data_controller_process]:
                # Wait 10 seconds
                w.join(10.)
                if w.is_alive():
                    # Process is hanging, kill it.
                    logger.info("Terminating %s forcefully.  This should have exited itself." % w.name)
                    w.terminate()
                    
        logger.info('Workers complete')

        self.particles = return_particles

        # Remove Manager so it shuts down
        del mgr

        # Remove pickled timevar
        os.remove(timevar_pickle_path)

        # Remove the cache file
        if remove_cache is True:
            try:
                os.remove(self.cache_path)
            except OSError:
                logger.debug("Could not remove cache file, it probably never existed")

        logger.progress((96, "Exporting results"))

        if len(self.particles) > 0:
            # If output_formats and path specified,
            # output particle run data to disk when completed
            if "output_formats" in kwargs:
                # Make sure output_path is also included
                if kwargs.get("output_path", None) != None:
                    formats = kwargs.get("output_formats")
                    output_path = kwargs.get("output_path")
                    if isinstance(formats, list):
                        for format in formats:
                            logger.info("Exporting to: %s" % format)
                            try:
                                self.export(output_path, format=format)
                            except:
                                logger.error("Failed to export to: %s" % format)
                    else:
                        logger.warn('The output_formats parameter should be a list, not saving any output!')  
                else:
                    logger.warn('No output path defined, not saving any output!')  
            else:
                logger.warn('No output format defined, not saving any output!')
        else:
            logger.warn("Model didn't actually do anything, check the log.")
            if error_code == -2:
                raise DataControllerError("Error in the DataController")
            else:
                raise ModelError("Error in the model")

        logger.progress((99, "Model Run Complete"))
        return
    def __reverse(self, **kwargs):
        """
            Reverse particle just off of the shore in the direction that it came in.
            Adds a slight random factor to the distance and angle it is reversed in.
        """
        start_point = kwargs.pop('start_point')
        hit_point = kwargs.pop('hit_point')
        distance = kwargs.pop('distance')
        azimuth = kwargs.pop('azimuth')
        reverse_azimuth = kwargs.pop('reverse_azimuth')
        reverse_distance = kwargs.get('reverse_distance', None)
        if reverse_distance is None:
            reverse_distance = 100

        # Randomize the reverse angle slightly (+/- 5 degrees)
        random_azimuth = reverse_azimuth + AsaRandom.random() * 5

        count = 0
        nudge_distance = 0.01
        nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance,
                                                  azimuth=reverse_azimuth,
                                                  start_point=hit_point)
        nudge_loc = Location4D(latitude=nudge_point['latitude'],
                               longitude=nudge_point['longitude'],
                               depth=start_point.depth)

        # Find point just offshore to do testing with.  Try 15 times (~350m).  This makes sure the start_point is in the water
        # for the next call to intersect (next while loop).
        while self.intersect(single_point=nudge_loc.point) and count < 16:
            nudge_distance *= 2
            nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance,
                                                      azimuth=reverse_azimuth,
                                                      start_point=hit_point)
            nudge_loc = Location4D(latitude=nudge_point['latitude'],
                                   longitude=nudge_point['longitude'],
                                   depth=start_point.depth)
            count += 1

        # We tried 16 times and couldn't find a point.  This should totally never happen.
        if count == 16:
            logger.debug(
                "WOW. Could not find location in water to do shoreline calculation with.  Assuming particle did not move from original location"
            )
            return start_point

        # Keep trying to throw particle back, halfing the distance each time until it is in water.
        # Only half it 12 times before giving up and returning the point which the particle came from.
        count = 0
        # Distance amount to half each iteration
        changing_distance = reverse_distance
        new_point = AsaGreatCircle.great_circle(distance=reverse_distance,
                                                azimuth=random_azimuth,
                                                start_point=hit_point)
        new_loc = Location4D(latitude=new_point['latitude'],
                             longitude=new_point['longitude'],
                             depth=start_point.depth)
        while self.intersect(start_point=nudge_loc.point,
                             end_point=new_loc.point) and count < 12:
            changing_distance /= 2
            new_point = AsaGreatCircle.great_circle(distance=changing_distance,
                                                    azimuth=random_azimuth,
                                                    start_point=hit_point)
            new_loc = Location4D(latitude=new_point['latitude'],
                                 longitude=new_point['longitude'],
                                 depth=start_point.depth)
            count += 1

        # We tried 10 times and the particle was still on shore, return the point the particle started from.
        # No randomization.
        if count == 12:
            logger.debug(
                "Could not react particle with shoreline.  Assuming particle did not move from original location"
            )
            return start_point

        return new_loc
Exemple #14
0
    def setup_run(self, hydrodataset, **kwargs):

        super(CachingModelController, self).setup_run(hydrodataset, **kwargs)

        # Get the number of cores (may take some tuning) and create that
        # many workers then pass particles into the queue for the workers
        self.mgr = multiprocessing.Manager()

        # This tracks if the system is 'alive'.  Most looping whiles will check this
        # and break out if it is False.  This is True until something goes very wrong.
        self.active = self.mgr.Value('bool', True)

        # Either spin up the number of cores, or the number of tasks
        self.nproc = min(multiprocessing.cpu_count() - 1,
                         self.total_task_count())

        # Number of tasks that we need to run.  This is decremented everytime something exits.
        self.n_run = self.mgr.Value('int', self.total_task_count())

        # The lock that controls access to the 'n_run' variable
        self.nproc_lock = self.mgr.Lock()

        # Create the task queue for all of the particles and the CachingDataController
        self.tasks = multiprocessing.JoinableQueue(self.total_task_count())

        # Create the result queue for all of the particles and the CachingDataController
        self.results = self.mgr.Queue(self.total_task_count())

        # Should we remove the cache file at the end of the run?
        self.remove_cache = kwargs.get("remove_cache", False)
        self.cache_path = kwargs.get("cache_path", None)

        # Create a temp file for the cache if nothing was passed in
        if self.cache_path is None:
            default_cache_dir = os.path.join(os.path.dirname(__file__),
                                             "_cache")
            temp_name = AsaRandom.filename(prefix=str(
                datetime.now().microsecond),
                                           suffix=".nc")
            self.cache_path = os.path.join(default_cache_dir, temp_name)

        # Be sure the cache directory exists
        if not os.path.exists(os.path.dirname(self.cache_path)):
            logger.info("Creating cache directory: %s" % self.cache_path)
            os.makedirs(os.path.dirname(self.cache_path))

        # Create the shared state objects

        # Particles use this to tell the Data Controller to "get_data".
        # The CachingDataController sets this to False when it is done writing to the cache file.
        # Particles will wait for this to be False before reading from the cache file.
        # If we are caching, this starts as True so the Particles don't take off.  If we
        # are not caching, this is False so the Particles can start immediatly.
        self.get_data = self.mgr.Value('bool', True)
        # Particles use this to tell the DataContoller which indices to 'get_data' for
        self.point_get = self.mgr.Value('list', [0, 0, 0])

        # This locks access to the 'has_data_request_lock' value
        self.data_request_lock = self.mgr.Lock()
        # This tracks which Particle PID is asking the CachingDataController for data
        self.has_data_request_lock = self.mgr.Value('int', -1)

        # The lock that controls access to modifying 'has_read_lock' and 'read_count'
        self.read_lock = self.mgr.Lock()
        # List of Particle PIDs that are reading from the cache
        self.has_read_lock = self.mgr.list()
        # The number of Particles that are reading from the cache
        self.read_count = self.mgr.Value('int', 0)

        # When something is writing to the cache file
        self.write_lock = self.mgr.Lock()
        # PID of process with lock
        self.has_write_lock = self.mgr.Value('int', -1)
    def setup_run(self, **kwargs):

        super(CachingModelController, self).setup_run(**kwargs)

        # Get the number of cores (may take some tuning) and create that
        # many workers then pass particles into the queue for the workers
        self.mgr = multiprocessing.Manager()

        # This tracks if the system is 'alive'.  Most looping whiles will check this
        # and break out if it is False.  This is True until something goes very wrong.
        self.active = self.mgr.Value('bool', True)

        # Either spin up the number of cores, or the number of tasks
        self.nproc = min(multiprocessing.cpu_count() - 1, self.number_of_tasks)

        # Number of tasks that we need to run.  This is decremented everytime something exits.
        self.n_run = self.mgr.Value('int', self.number_of_tasks)

        # The lock that controls access to the 'n_run' variable
        self.nproc_lock = self.mgr.Lock()

        # Create the task queue for all of the particles and the CachingDataController
        self.tasks = multiprocessing.JoinableQueue(self.number_of_tasks)

        # Create the result queue for all of the particles and the CachingDataController
        self.results = self.mgr.Queue(self.number_of_tasks)

        # Should we remove the cache file at the end of the run?
        self.remove_cache        = kwargs.get("remove_cache", False)
        self.cache_path          = kwargs.get("cache_path", None)

        # Create a temp file for the cache if nothing was passed in
        if self.cache_path is None:
            default_cache_dir = os.path.join(os.path.dirname(__file__), "_cache")
            temp_name = AsaRandom.filename(prefix=str(datetime.now().microsecond), suffix=".nc")
            self.cache_path = os.path.join(default_cache_dir, temp_name)

        # Be sure the cache directory exists
        if not os.path.exists(os.path.dirname(self.cache_path)):
            logger.info("Creating cache directory: %s" % self.cache_path)
            os.makedirs(os.path.dirname(self.cache_path))

        # Create the shared state objects

        # Particles use this to tell the Data Controller to "get_data".
        # The CachingDataController sets this to False when it is done writing to the cache file.
        # Particles will wait for this to be False before reading from the cache file.
        # If we are caching, this starts as True so the Particles don't take off.  If we
        # are not caching, this is False so the Particles can start immediatly.
        self.get_data = self.mgr.Value('bool', True)
        # Particles use this to tell the DataContoller which indices to 'get_data' for
        self.point_get = self.mgr.Value('list', [0, 0, 0])

        # This locks access to the 'has_data_request_lock' value
        self.data_request_lock = self.mgr.Lock()
        # This tracks which Particle PID is asking the CachingDataController for data
        self.has_data_request_lock = self.mgr.Value('int', -1)

        # The lock that controls access to modifying 'has_read_lock' and 'read_count'
        self.read_lock = self.mgr.Lock()
        # List of Particle PIDs that are reading from the cache
        self.has_read_lock = self.mgr.list()
        # The number of Particles that are reading from the cache
        self.read_count = self.mgr.Value('int', 0)

        # When something is writing to the cache file
        self.write_lock = self.mgr.Lock()
        # PID of process with lock
        self.has_write_lock = self.mgr.Value('int', -1)
Exemple #16
0
    def __reverse(self, **kwargs):
        """
            Reverse particle just off of the shore in the direction that it came in.
            Adds a slight random factor to the distance and angle it is reversed in.
        """

        #st = time.clock()

        start_point = kwargs.pop('start_point')
        hit_point = kwargs.pop('hit_point')
        reverse_azimuth = kwargs.pop('reverse_azimuth')
        reverse_distance = kwargs.get('reverse_distance', None)
        if reverse_distance is None:
            reverse_distance = 100

        # Randomize the reverse angle slightly (+/- 5 degrees)
        random_azimuth = reverse_azimuth + AsaRandom.random() * 5

        count = 0
        nudge_distance = 0.01
        nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance,
                                                  azimuth=reverse_azimuth,
                                                  start_point=hit_point)
        nudge_loc = Location4D(latitude=nudge_point['latitude'],
                               longitude=nudge_point['longitude'],
                               depth=start_point.depth)

        # Find point just offshore to do testing with.  Try 15 times (~350m).  This makes sure the start_point is in the water
        # for the next call to intersect (next while loop).
        while self.intersect(single_point=nudge_loc.point) and count < 16:
            nudge_distance *= 2
            nudge_point = AsaGreatCircle.great_circle(distance=nudge_distance,
                                                      azimuth=reverse_azimuth,
                                                      start_point=hit_point)
            nudge_loc = Location4D(latitude=nudge_point['latitude'],
                                   longitude=nudge_point['longitude'],
                                   depth=start_point.depth)
            count += 1

        # We tried 16 times and couldn't find a point.  This should totally never happen.
        if count == 16:
            logger.warn(
                "LOOK: Could not find location in water to do shoreline calculation with.  Assuming particle did not move from original location"
            )
            return start_point

        # Keep trying to throw particle back, halfing the distance each time until it is in water.
        # Only half it 6 times before giving up and returning the point which the particle came from.
        count = 0
        # Distance amount to half each iteration
        changing_distance = reverse_distance
        new_point = AsaGreatCircle.great_circle(distance=reverse_distance,
                                                azimuth=random_azimuth,
                                                start_point=hit_point)
        new_loc = Location4D(latitude=new_point['latitude'],
                             longitude=new_point['longitude'],
                             depth=start_point.depth)

        # We don't want to reverse further than the current spatial buffer, because we will reindex the
        # source file everytime we reverse, which will slow down the calculations considerably.
        while (not self._spatial_query_object.contains(new_loc.point)
               or self.intersect(start_point=nudge_loc.point,
                                 end_point=new_loc.point)) and count < 6:
            changing_distance /= 2
            new_point = AsaGreatCircle.great_circle(distance=changing_distance,
                                                    azimuth=random_azimuth,
                                                    start_point=hit_point)
            new_loc = Location4D(latitude=new_point['latitude'],
                                 longitude=new_point['longitude'],
                                 depth=start_point.depth)
            count += 1

        # We tried 10 times and the particle was still on shore, return the point the particle started from.
        # No randomization.
        if count == 6:
            logger.warn(
                "LOOK: Could not react particle with shoreline.  Assuming particle did not move from original location"
            )
            return start_point

        #logger.info("Reaction time: %f" % (time.clock() - st))
        return new_loc