def particle_runner(part, model): from paegan.logger import logger logger.setLevel(logging.PROGRESS) from paegan.logger.redis_handler import RedisHandler rhandler = RedisHandler(model.redis_log_channel, model.redis_url) rhandler.setLevel(logging.PROGRESS) logger.addHandler(rhandler) try: redis_connection = redis.from_url(model.redis_url) forcer = BaseForcer( model.hydrodataset, particle=part, common_variables=model.common_variables, times=model.times, start_time=model.start, models=model._models, release_location_centroid=model.reference_location.point, usebathy=model._use_bathymetry, useshore=model._use_shoreline, usesurface=model._use_seasurface, reverse_distance=model.reverse_distance, bathy_path=model.bathy_path, shoreline_path=model.shoreline_path, shoreline_feature=model.shoreline_feature, time_method=model.time_method, redis_url=model.redis_url, redis_results_channel=model.redis_results_channel, shoreline_index_buffer=model.shoreline_index_buffer) forcer.run() except Exception: logger.exception(traceback.format_exc()) redis_connection.publish( model.redis_results_channel, json.dumps({ "status": "FAILED", "uid": part.uid })) else: redis_connection.publish( model.redis_results_channel, json.dumps({ "status": "COMPLETED", "uid": part.uid }))
def run(run_id): with app.app_context(): job = get_current_job(connection=redis_connection) output_path = os.path.join(current_app.config['OUTPUT_PATH'], run_id) shutil.rmtree(output_path, ignore_errors=True) os.makedirs(output_path) cache_path = os.path.join(current_app.config['CACHE_PATH'], run_id) shutil.rmtree(cache_path, ignore_errors=True) os.makedirs(cache_path) f, log_file = tempfile.mkstemp(dir=cache_path, prefix=run_id, suffix=".log") os.close(f) os.chmod(log_file, 0644) run = db.Run.find_one( { '_id' : ObjectId(run_id) } ) if run is None: return "Failed to locate run %s. May have been deleted while task was in the queue?" % run_id # Wait for PubSub listening to begin time.sleep(2) model = None try: hydropath = run['hydro_path'] geometry = loads(run['geometry']) start_depth = run['release_depth'] num_particles = run['particles'] time_step = run['timestep'] num_steps = int(math.ceil((run['duration'] * 24 * 60 * 60) / time_step)) start_time = run['start'].replace(tzinfo = pytz.utc) shoreline_path = run['shoreline_path'] or app.config.get("SHORE_PATH") shoreline_feat = run['shoreline_feature'] # Setup Models models = [] if run['cached_behavior'] is not None and run['cached_behavior'].get('results', None) is not None: behavior_data = run['cached_behavior']['results'][0] l = LarvaBehavior(data=behavior_data) models.append(l) models.append(Transport(horizDisp=run['horiz_dispersion'], vertDisp=run['vert_dispersion'])) output_formats = [ ex.H5Trackline, ex.H5TracklineWithPoints, ex.H5ParticleTracklines, ex.H5ParticleMultiPoint, ex.H5GDALShapefile ] def save_progress(stop, progress_deque, logger): while True: if stop(): break try: record = progress_deque.pop() if record == StopIteration: break job.meta["updated"] = record[0] if record is not None and record[1] >= 0: job.meta["progress"] = record[1] if isinstance(record[2], unicode) or isinstance(record[2], str): job.meta["message"] = record[2] job.save() except IndexError: pass except Exception: raise # Relax time.sleep(0.01) # Set up Logger from paegan.logger.progress_handler import ProgressHandler from paegan.logger.multi_process_logging import MultiProcessingLogHandler from paegan.logger import logger # Logging handler queue = multiprocessing.Queue(-1) logger.setLevel(logging.PROGRESS) if app.config.get("DEBUG") is True: mphandler = MultiProcessingLogHandler(log_file, queue, stream=True) else: mphandler = MultiProcessingLogHandler(log_file, queue) mphandler.setLevel(logging.PROGRESS) formatter = logging.Formatter('[%(asctime)s] - %(levelname)s - %(name)s - %(processName)s - %(message)s') mphandler.setFormatter(formatter) logger.addHandler(mphandler) # Progress handler progress_deque = collections.deque(maxlen=1) progress_handler = ProgressHandler(progress_deque) progress_handler.setLevel(logging.PROGRESS) logger.addHandler(progress_handler) stop_log_listener = False pl = threading.Thread(name="ProgressUpdater", target=save_progress, args=(lambda: stop_log_listener, progress_deque, logger,)) pl.start() model = CachingModelController( geometry=geometry, depth=start_depth, start=start_time, step=time_step, nstep=num_steps, npart=num_particles, models=models, use_bathymetry=True, bathy_path=current_app.config['BATHY_PATH'], use_shoreline=True, time_method=run['time_method'], time_chunk=run['time_chunk'], horiz_chunk=run['horiz_chunk'], shoreline_path=shoreline_path, shoreline_feature=shoreline_feat, shoreline_index_buffer=0.05) cache_file = os.path.join(cache_path, run_id + ".nc.cache") model.setup_run(hydropath, cache_path=cache_file, remove_cache=False) run.started = datetime.utcnow() model.run(output_formats=output_formats, output_path=output_path) except Exception as e: # Stop progress log handler stop_log_listener = True # Wait for log listener to exit pl.join() app.logger.exception("Failed to run model") job.meta["message"] = e.message job.meta["outcome"] = "failed" else: # Stop progress log handler stop_log_listener = True # Wait for log listener to exit pl.join() job.meta["message"] = "Complete" job.meta["outcome"] = "success" job.meta["progress"] = 100 finally: # Close and remove the handlers so we can use the log file without a file lock for hand in list(logger.handlers): logger.removeHandler(hand) hand.flush() hand.close() queue.close() queue.join_thread() time.sleep(1) # Move logfile to output directory shutil.move(log_file, os.path.join(output_path, 'model.log')) # Move cachefile to output directory if we made one shutil.move(cache_file, os.path.join(output_path, 'hydro_cache.nc')) output_files = [] for filename in os.listdir(output_path): outfile = os.path.join(output_path, filename) output_files.append(outfile) result_files = [] # Handle results and cleanup if current_app.config['USE_S3'] is True: base_access_url = urljoin("http://%s.s3.amazonaws.com/output/" % current_app.config['S3_BUCKET'], run_id) # Upload results to S3 and remove the local copies conn = S3Connection() bucket = conn.get_bucket(current_app.config['S3_BUCKET']) for outfile in output_files: # Upload the outfile with the same as the run name _, ext = os.path.splitext(outfile) new_filename = slugify(unicode(run['name'])) + ext k = Key(bucket) k.key = "output/%s/%s" % (run_id, new_filename) k.set_contents_from_filename(outfile) k.set_acl('public-read') result_files.append(base_access_url + "/" + new_filename) os.remove(outfile) shutil.rmtree(output_path, ignore_errors=True) else: result_files = output_files job.meta["updated"] = datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(pytz.utc) job.save() # Set output fields run.output = result_files run.ended = datetime.utcnow() run.compute() run.save()
def setup_run(self, **kwargs): logger.setLevel(logging.PROGRESS) self.redis_url = None self.redis_log_channel = None self.redis_results_channel = None if "redis" in kwargs.get("output_formats", []): from paegan.logger.redis_handler import RedisHandler self.redis_url = kwargs.get("redis_url") self.redis_log_channel = kwargs.get("redis_log_channel") self.redis_results_channel = kwargs.get("redis_results_channel") rhandler = RedisHandler(self.redis_log_channel, self.redis_url) rhandler.setLevel(logging.PROGRESS) logger.addHandler(rhandler) # Relax. time.sleep(0.5) # Add ModelController description to logfile logger.info(unicode(self)) # Add the model descriptions to logfile for m in self._models: logger.info(unicode(m)) # Calculate the model timesteps # We need times = len(self._nstep) + 1 since data is stored one timestep # after a particle is forced with the final timestep's data. self.times = range(0, (self._step*self._nstep)+1, self._step) # Calculate a datetime object for each model timestep # This method is duplicated in CachingDataController and CachingForcer # using the 'times' variables above. Will be useful in those other # locations for particles released at different times # i.e. released over a few days self.modelTimestep, self.datetimes = AsaTransport.get_time_objects_from_model_timesteps(self.times, start=self.start) logger.progress((1, "Setting up particle start locations")) point_locations = [] if isinstance(self.geometry, Point): point_locations = [self.reference_location] * self._npart elif isinstance(self.geometry, Polygon) or isinstance(self.geometry, MultiPolygon): point_locations = [Location4D(latitude=loc.y, longitude=loc.x, depth=self._depth, time=self.start) for loc in AsaTransport.fill_polygon_with_points(goal=self._npart, polygon=self.geometry)] # Initialize the particles logger.progress((2, "Initializing particles")) for x in xrange(0, self._npart): p = LarvaParticle(id=x) p.location = point_locations[x] # We don't need to fill the location gaps here for environment variables # because the first data collected actually relates to this original # position. # We do need to fill in fields such as settled, halted, etc. p.fill_status_gap() # Set the inital note p.note = p.outputstring() p.notes.append(p.note) self.particles.append(p) if kwargs.get("manager", True): # Get the number of cores (may take some tuning) and create that # many workers then pass particles into the queue for the workers self.mgr = multiprocessing.Manager() # This tracks if the system is 'alive'. Most looping whiles will check this # and break out if it is False. This is True until something goes very wrong. self.active = self.mgr.Value('bool', True) # Each particle is a task, plus the CachingDataController self.number_of_tasks = self.get_number_of_tasks() # Either spin up the number of cores, or the number of tasks self.nproc = min(multiprocessing.cpu_count() - 1, self.number_of_tasks) # Number of tasks that we need to run. This is decremented everytime something exits. self.n_run = self.mgr.Value('int', self.number_of_tasks) # The lock that controls access to the 'n_run' variable self.nproc_lock = self.mgr.Lock() # Create the task queue for all of the particles and the CachingDataController self.tasks = multiprocessing.JoinableQueue(self.number_of_tasks) # Create the result queue for all of the particles and the CachingDataController self.results = self.mgr.Queue(self.number_of_tasks) logger.progress((3, "Initializing and caching hydro model's grid")) try: ds = CommonDataset.open(self.hydrodataset) except Exception: logger.exception("Failed to access dataset %s" % self.hydrodataset) raise BaseDataControllerError("Inaccessible Dataset: %s" % self.hydrodataset) # Query the dataset for common variable names # and the time variable. logger.debug("Retrieving variable information from dataset") self.common_variables = self.get_common_variables_from_dataset(ds) self.timevar = None try: assert self.common_variables.get("u") in ds._current_variables assert self.common_variables.get("v") in ds._current_variables assert self.common_variables.get("x") in ds._current_variables assert self.common_variables.get("y") in ds._current_variables self.timevar = ds.gettimevar(self.common_variables.get("u")) except AssertionError: logger.exception("Could not locate variables needed to run model: %s" % unicode(self.common_variables)) raise BaseDataControllerError("A required data variable was not found in %s" % self.hydrodataset) model_start = self.timevar.get_dates()[0] model_end = self.timevar.get_dates()[-1] try: assert self.start > model_start assert self.start < model_end except AssertionError: raise BaseDataControllerError("Start time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[0], model_start, model_end)) try: assert self.datetimes[-1] > model_start assert self.datetimes[-1] < model_end except AssertionError: raise BaseDataControllerError("End time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[-1], model_start, model_end)) ds.closenc()
def setup_run(self, hydrodataset, **kwargs): self.hydrodataset = hydrodataset logger.setLevel(logging.PROGRESS) # Relax. time.sleep(0.5) # Add ModelController description to logfile logger.info(str(self)) # Add the model descriptions to logfile for m in self._models: logger.info(str(m)) # Calculate the model timesteps # We need times = len(self._nstep) + 1 since data is stored one timestep # after a particle is forced with the final timestep's data. self.times = list(range(0, (self._step * self._nstep) + 1, self._step)) # Calculate a datetime object for each model timestep # This method is duplicated in CachingDataController and CachingForcer # using the 'times' variables above. Will be useful in those other # locations for particles released at different times # i.e. released over a few days self.modelTimestep, self.datetimes = AsaTransport.get_time_objects_from_model_timesteps( self.times, start=self.start) logger.progress((1, "Setting up particle start locations")) point_locations = [] if isinstance(self.geometry, Point): point_locations = [self.reference_location] * self._npart elif isinstance(self.geometry, Polygon) or isinstance( self.geometry, MultiPolygon): point_locations = [ Location4D(latitude=loc.y, longitude=loc.x, depth=self._depth, time=self.start) for loc in AsaTransport.fill_polygon_with_points( goal=self._npart, polygon=self.geometry) ] # Initialize the particles logger.progress((2, "Initializing particles")) for x in range(0, self._npart): p = LarvaParticle(id=x) p.location = point_locations[x] # We don't need to fill the location gaps here for environment variables # because the first data collected actually relates to this original # position. # We do need to fill in fields such as settled, halted, etc. p.fill_status_gap() # Set the inital note p.note = p.outputstring() p.notes.append(p.note) self.particles.append(p) logger.progress((3, "Initializing and caching hydro model's grid %s" % self.hydrodataset)) try: ds = CommonDataset.open(self.hydrodataset) # Query the dataset for common variable names # and the time variable. logger.debug("Retrieving variable information from dataset") self.common_variables = self.get_common_variables_from_dataset(ds) except Exception: logger.exception("Failed to access dataset %s" % self.hydrodataset) raise BaseDataControllerError("Inaccessible Dataset: %s" % self.hydrodataset) self.timevar = None try: assert self.common_variables.get("u") in ds._current_variables assert self.common_variables.get("v") in ds._current_variables assert self.common_variables.get("x") in ds._current_variables assert self.common_variables.get("y") in ds._current_variables self.timevar = ds.gettimevar(self.common_variables.get("u")) model_start = self.timevar.get_dates()[0] model_end = self.timevar.get_dates()[-1] except AssertionError: logger.exception( "Could not locate variables needed to run model: %s" % str(self.common_variables)) raise BaseDataControllerError( "A required data variable was not found in %s" % self.hydrodataset) finally: ds.closenc() try: assert self.start > model_start assert self.start < model_end except AssertionError: raise BaseDataControllerError( "Start time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[0], model_start, model_end)) try: assert self.datetimes[-1] > model_start assert self.datetimes[-1] < model_end except AssertionError: raise BaseDataControllerError( "End time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[-1], model_start, model_end))
def run(run_id): # Sleep to give the Run object enough time to save time.sleep(10) with app.app_context(): from paegan.logger import logger job = get_current_job() output_path = os.path.join(current_app.config['OUTPUT_PATH'], run_id) shutil.rmtree(output_path, ignore_errors=True) os.makedirs(output_path) cache_path = os.path.join(current_app.config['CACHE_PATH'], run_id) shutil.rmtree(cache_path, ignore_errors=True) os.makedirs(cache_path) temp_animation_path = os.path.join(current_app.config['OUTPUT_PATH'], "temp_images_" + run_id) shutil.rmtree(temp_animation_path, ignore_errors=True) os.makedirs(temp_animation_path) # Set up Logger queue = multiprocessing.Queue(-1) f, log_file = tempfile.mkstemp(dir=cache_path, prefix=run_id, suffix=".log") os.close(f) # Close any existing handlers (hand.close() for hand in logger.handlers) # Remove any existing handlers logger.handlers = [] logger.setLevel(logging.PROGRESS) handler = MultiProcessingLogHandler(log_file, queue) handler.setLevel(logging.PROGRESS) formatter = logging.Formatter('[%(asctime)s] - %(levelname)s - %(name)s - %(processName)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # Progress stuff. Hokey! progress_deque = collections.deque(maxlen=1) progress_handler = ProgressHandler(progress_deque) progress_handler.setLevel(logging.PROGRESS) logger.addHandler(progress_handler) e = threading.Event() def save_progress(): while e.wait(5) is not True: try: record = progress_deque.pop() if record == StopIteration: break job.meta["updated"] = record[0] if record is not None and record[1] >= 0: job.meta["progress"] = record[1] if isinstance(record[2], unicode) or isinstance(record[2], str): job.meta["message"] = record[2] job.save() except IndexError: pass except Exception: raise return t = threading.Thread(name="ProgressUpdater", target=save_progress) t.daemon = True t.start() model = None try: logger.progress((0, "Configuring model")) run = db.Run.find_one( { '_id' : ObjectId(run_id) } ) if run is None: return "Failed to locate run %s. May have been deleted while task was in the queue?" % run_id geometry = loads(run['geometry']) start_depth = run['release_depth'] num_particles = run['particles'] time_step = run['timestep'] num_steps = int(math.ceil((run['duration'] * 24 * 60 * 60) / time_step)) start_time = run['start'].replace(tzinfo = pytz.utc) shoreline_path = run['shoreline_path'] or app.config.get("SHORE_PATH") shoreline_feat = run['shoreline_feature'] # Set up output directory/bucket for run output_formats = ['Shapefile', 'NetCDF', 'Trackline'] # Setup Models models = [] if run['cached_behavior'] is not None and run['cached_behavior'].get('results', None) is not None: behavior_data = run['cached_behavior']['results'][0] l = LarvaBehavior(data=behavior_data) models.append(l) models.append(Transport(horizDisp=run['horiz_dispersion'], vertDisp=run['vert_dispersion'])) # Setup ModelController model = ModelController(geometry=geometry, depth=start_depth, start=start_time, step=time_step, nstep=num_steps, npart=num_particles, models=models, use_bathymetry=True, use_shoreline=True, time_chunk=run['time_chunk'], horiz_chunk=run['horiz_chunk'], time_method=run['time_method'], shoreline_path=shoreline_path, shoreline_feature=shoreline_feat, reverse_distance=1500) # Run the model cache_file = os.path.join(cache_path, run_id + ".nc.cache") bathy_file = current_app.config['BATHY_PATH'] model.run(run['hydro_path'], output_path=output_path, bathy=bathy_file, output_formats=output_formats, cache=cache_file, remove_cache=False, caching=run['caching']) # Skip creating movie output_path """ from paegan.viz.trajectory import CFTrajectory logger.info("Creating animation...") for filename in os.listdir(output_path): if os.path.splitext(filename)[1][1:] == "nc": # Found netCDF file netcdf_file = os.path.join(output_path,filename) traj = CFTrajectory(netcdf_file) success = traj.plot_animate(os.path.join(output_path,'animation.avi'), temp_folder=temp_animation_path, bathy=app.config['BATHY_PATH']) if not success: logger.info("Could not create animation") else: logger.info("Animation saved") """ job.meta["outcome"] = "success" job.save() return "Successfully ran %s" % run_id except Exception as exception: logger.warn("Run FAILED, cleaning up and uploading log.") logger.warn(exception.message) job.meta["outcome"] = "failed" job.save() raise finally: logger.progress((99, "Processing output files")) # Close the handler so we can upload the log file without a file lock (hand.close() for hand in logger.handlers) queue.put(StopIteration) # Break out of the progress loop e.set() t.join() # Move logfile to output directory shutil.move(log_file, os.path.join(output_path, 'model.log')) # Move cachefile to output directory if we made one if run['caching']: shutil.move(cache_file, output_path) output_files = [] for filename in os.listdir(output_path): outfile = os.path.join(output_path, filename) output_files.append(outfile) result_files = [] base_access_url = current_app.config.get('NON_S3_OUTPUT_URL', None) # Handle results and cleanup if current_app.config['USE_S3'] is True: base_access_url = urljoin("http://%s.s3.amazonaws.com/output/" % current_app.config['S3_BUCKET'], run_id) # Upload results to S3 and remove the local copies conn = S3Connection() bucket = conn.get_bucket(current_app.config['S3_BUCKET']) for outfile in output_files: # Don't upload the cache file if os.path.basename(outfile) == os.path.basename(cache_file): continue # Upload the outfile with the same as the run name _, ext = os.path.splitext(outfile) new_filename = slugify(unicode(run['name'])) + ext k = Key(bucket) k.key = "output/%s/%s" % (run_id, new_filename) k.set_contents_from_filename(outfile) k.set_acl('public-read') result_files.append(base_access_url + "/" + new_filename) os.remove(outfile) shutil.rmtree(output_path, ignore_errors=True) else: for outfile in output_files: result_files.append(urljoin(base_access_url, run_id) + "/" + os.path.basename(outfile)) shutil.rmtree(temp_animation_path, ignore_errors=True) # Set output fields run.output = result_files run.ended = datetime.utcnow() run.compute() run.save() # Cleanup logger.removeHandler(handler) del formatter del handler del logger del model queue.close() job.meta["message"] = "Complete" job.save()
def setup_run(self, hydrodataset, **kwargs): self.hydrodataset = hydrodataset logger.setLevel(logging.PROGRESS) # Relax. time.sleep(0.5) # Add ModelController description to logfile logger.info(str(self)) # Add the model descriptions to logfile for m in self._models: logger.info(str(m)) # Calculate the model timesteps # We need times = len(self._nstep) + 1 since data is stored one timestep # after a particle is forced with the final timestep's data. self.times = list(range(0, (self._step*self._nstep)+1, self._step)) # Calculate a datetime object for each model timestep # This method is duplicated in CachingDataController and CachingForcer # using the 'times' variables above. Will be useful in those other # locations for particles released at different times # i.e. released over a few days self.modelTimestep, self.datetimes = AsaTransport.get_time_objects_from_model_timesteps(self.times, start=self.start) logger.progress((1, "Setting up particle start locations")) point_locations = [] if isinstance(self.geometry, Point): point_locations = [self.reference_location] * self._npart elif isinstance(self.geometry, Polygon) or isinstance(self.geometry, MultiPolygon): point_locations = [Location4D(latitude=loc.y, longitude=loc.x, depth=self._depth, time=self.start) for loc in AsaTransport.fill_polygon_with_points(goal=self._npart, polygon=self.geometry)] # Initialize the particles logger.progress((2, "Initializing particles")) for x in range(0, self._npart): p = LarvaParticle(id=x) p.location = point_locations[x] # We don't need to fill the location gaps here for environment variables # because the first data collected actually relates to this original # position. # We do need to fill in fields such as settled, halted, etc. p.fill_status_gap() # Set the inital note p.note = p.outputstring() p.notes.append(p.note) self.particles.append(p) logger.progress((3, "Initializing and caching hydro model's grid %s" % self.hydrodataset)) try: ds = CommonDataset.open(self.hydrodataset) # Query the dataset for common variable names # and the time variable. logger.debug("Retrieving variable information from dataset") self.common_variables = self.get_common_variables_from_dataset(ds) except Exception: logger.exception("Failed to access dataset %s" % self.hydrodataset) raise BaseDataControllerError("Inaccessible Dataset: %s" % self.hydrodataset) self.timevar = None try: assert self.common_variables.get("u") in ds._current_variables assert self.common_variables.get("v") in ds._current_variables assert self.common_variables.get("x") in ds._current_variables assert self.common_variables.get("y") in ds._current_variables self.timevar = ds.gettimevar(self.common_variables.get("u")) model_start = self.timevar.get_dates()[0] model_end = self.timevar.get_dates()[-1] except AssertionError: logger.exception("Could not locate variables needed to run model: %s" % str(self.common_variables)) raise BaseDataControllerError("A required data variable was not found in %s" % self.hydrodataset) finally: ds.closenc() try: assert self.start > model_start assert self.start < model_end except AssertionError: raise BaseDataControllerError("Start time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[0], model_start, model_end)) try: assert self.datetimes[-1] > model_start assert self.datetimes[-1] < model_end except AssertionError: raise BaseDataControllerError("End time for model (%s) is not available in source dataset (%s/%s)" % (self.datetimes[-1], model_start, model_end))