def overlay_point_data(self, point_url): """ Using the land cover map, overlays the point dataset passed in in such a way to enable it to be viewed over WMS @param point_url: OpenDAP URL pointing to the dataset @returns: The WMS URL for the duplicate point dataset """ # First let's create a temporary directory with working_directory(DatasetConversionWorkingDirectory(), os.path.join(os.path.dirname(__file__),"grid_data")) as working_dir: points = coverage = None try: # Open the points data points = open_url(point_url) # Coverage data needs to be opened from file, # as we'll be writing to it coverage = Dataset(working_dir.coverage_map, 'a', format='NETCDF4') # This map has a Land cover variable that we can # abuse to write a maximum value against for any # point whose x,y match up # First, get the x and y's out for the points north_list = points['northing'][:] east_list = points['easting'][:] coverage_north = coverage.variables['y'][:] coverage_east = coverage.variables['x'][:] # Rename the variable, and set the max value # to the default max (50) coverage.variables['LandCover'].long_name = "Point Data" coverage.variables['LandCover'].valid_max = 50 # Now pull out the Land Cover variable, and reset # to below 0, so the points are invisible value = coverage.variables['LandCover'][:,:] for (y,x), _ in numpy.ndenumerate(value): value[y,x] = -1 # Now look for the points in the coverage map, and set the value # to the maximum when found for x,y in zip(east_list, north_list): index_x = numpy.where(coverage_east==find_closest(coverage_east, x))[0][0] index_y = numpy.where(coverage_north==find_closest(coverage_north, y))[0][0] value[index_y,index_x] = 50 coverage.variables['LandCover'][:,:] = value # OK, now we need to copy the coverage map to # the map server location defined in the config # Read the config file_name = "%s_%s.nc" % ("points", str(datetime.datetime.now().isoformat()).replace(':', '-')) file_destination = os.path.join(self._config['netcdf_file_location'], file_name) shutil.copyfile(working_dir.coverage_map, file_destination) wms_url = self._config['thredds_wms_format'] % file_name return wms_url finally: if points: del points if coverage: coverage.close()
def run(self, analysis_obj): """Runs the analysis, updating the model object passed in with a result file URL and a PNG image (base64 encoded) Params: analysis: Ecomaps analysis model to update """ self._analysis_obj = analysis_obj with working_directory(EcomapsAnalysisWorkingDirectory(), os.path.join(os.path.dirname(__file__), self._source_dir)) as dir: log.debug("Analysis for %s has started" % self._analysis_obj.name) #RUN analysis = EcomapsAnalysis(dir, analysis_obj.run_by_user.name, analysis_obj.run_by_user.email) file_name = "%s_%s.nc" % (self._analysis_obj.name.replace(' ', '-'), str(datetime.datetime.now().isoformat()).replace(':', '-')) coverage_dict = {} for ds in analysis_obj.coverage_datasets: # Make a sensible data structure to tie the columns chosen # for each dataset with any time slice information coverage_dict[ds.dataset] = [(c.column, c.time_index) for c in ds.columns] # Now we have enough information to kick the analysis off output_file_loc, map_image_file_loc, \ fit_image_file_loc = analysis.run(analysis_obj.point_dataset.netcdf_url, coverage_dict, self._update_progress) # Write the result image to with open(map_image_file_loc, "rb") as img: encoded_image = base64.b64encode(img.read()) self._analysis_obj.result_image = encoded_image with open(fit_image_file_loc, "rb") as img: encoded_image = base64.b64encode(img.read()) self._analysis_obj.fit_image = encoded_image # Grab the "convenience" values from the dataset, which # we'll store against the analysis, saves looking in the netCDF each time try: netCdf = NetCDFDataset(output_file_loc, 'r', format='NETCDF4') self._analysis_obj.aic = str(netCdf.AIC) self._analysis_obj.model_formula = netCdf.model_formula except: log.warning('Failed to get netCDF attributes at the end of %s' % self._analysis_obj.name) # Copy the result file to the ecomaps THREDDS server # Set the file name to the name of the analysis + a bit of uniqueness shutil.copyfile(output_file_loc, os.path.join(self._netcdf_file_store, file_name)) # Generate a WMS URL for the output file... wms_url = self._thredds_wms_format % file_name # Create a result dataset result_ds = Dataset() result_ds.name = self._analysis_obj.name result_ds.wms_url = wms_url # 3 = result dataset result_ds.dataset_type_id = 3 result_ds.netcdf_url = self._open_ndap_format % file_name result_ds.viewable_by_user_id = analysis_obj.run_by_user.id # Tidy up the analysis object self._save_analysis(result_ds) self._update_progress('Complete', True)