def create_coverage_dataset(self,name,wms_url,netcdf_url,low_res_url, data_range_from, data_range_to, is_categorical): """ Creates a coverage dataset in the EcoMaps DB @param name: Display name of the dataset @param wms_url: Endpoint for the mapping server @param netcdf_url: URL of the OpenDAP endpoint for this dataset @param low_res_url: URL for accessing the NetCDF file over the HTTP protocol @param data_range_from: Low range for the data @param data_range_to: High range for the data @param is_categorical: Set to true if the data is categorical (not continuous) """ with self.transaction_scope() as session: dataset_type = session.query(DatasetType).filter(DatasetType.type=='Coverage').one() dataset = Dataset() dataset.name = name dataset.dataset_type = dataset_type dataset.netcdf_url = netcdf_url dataset.wms_url = wms_url dataset.low_res_url = low_res_url dataset.data_range_from = data_range_from dataset.data_range_to = data_range_to dataset.is_categorical = is_categorical session.add(dataset)
def test_analysis_run(self): handler = logging.StreamHandler(sys.stderr) handler.setLevel(logging.DEBUG) log.setLevel(logging.DEBUG) try: #log.basicConfig(stream=sys.stderr) log.addHandler(handler) with working_directory( EcomapsAnalysisWorkingDirectory(), os.path.join(os.path.dirname(__file__), '../code_root')) as dir: # CEH Chess Data coverage_ds = Dataset() #coverage_ds.name = 'CHESS 1971 01' #coverage_ds.netcdf_url = 'http://*****:*****@test.com') analysis.run(point_url='http://thredds-prod.nerc-lancaster.ac.uk/thredds/dodsC/ECOMAPSDetail/ECOMAPSInputLOI01.nc', coverage_dict=coverage_dict, progress_fn=progress) finally: log.removeHandler(handler) # Remove me to test analysis code return
def create_point_dataset(self,name,wms_url,netcdf_url): """ Creates a point dataset in the EcoMaps DB @param name: Display name of the dataset @param wms_url: Endpoint for the mapping server @param netcdf_url: URL of the OpenDAP endpoint for this dataset """ with self.transaction_scope() as session: dataset_type = session.query(DatasetType).filter(DatasetType.type=='Point').one() dataset = Dataset() dataset.name = name dataset.dataset_type = dataset_type dataset.netcdf_url = netcdf_url dataset.wms_url = wms_url dataset.low_res_url = None session.add(dataset)
def registerThreddsDatasets(url, types, session): """Scan over the given url for thredds datasets. Add to the session""" xml = parse(urllib2.urlopen(url)) for dataset in xml.getElementsByTagName("dataset"): if dataset.hasAttribute("urlPath"): # Here we should lookup the sevicename which (contained in this element) # and find out the services base. ds = Dataset() ds.dataset_type = types["GRID"] # Set to GRID type by default # See if a dataType has been defined for this dataset. If so, look # it up dataTypes = dataset.getElementsByTagName("dataType") if dataTypes.length == 1: ds.dataset_type = types[dataTypes[0].firstChild.nodeValue] path = dataset.attributes["urlPath"].value ds.name = dataset.attributes["name"].value ds.wms_url = urljoin(url, "/thredds/wms/" + path + "?service=WMS&version=1.3.0&request=GetCapabilities") ds.netcdf_url = urljoin(url, "/thredds/dodsC/" + path) session.add(ds) # Register the dataset to the session # Group sibling catalogRefs together. If any of these have an aggregation we will scan them # otherwise just scan all of the catalogueRegs catalogRefs = xml.getElementsByTagName("catalogRef") for key, group in groupby(catalogRefs, lambda e: e.parentNode): groupList = list(group) aggregations = filter(lambda x: x.attributes["xlink:title"].value.lower().endswith("aggregation"), groupList) scan = aggregations if len(aggregations) > 0 else groupList # Were there any aggregations? for catRef in scan: # Check if the current catRef node has any sibling datasets which are aggregations. # If it does, ignore this catRef if not hasSiblingAggregationDatasets(catRef): path = urljoin(url, catRef.attributes["xlink:href"].value) registerThreddsDatasets(path, types, session)
def run(self, analysis_obj): """Runs the analysis, updating the model object passed in with a result file URL and a PNG image (base64 encoded) Params: analysis: Ecomaps analysis model to update """ self._analysis_obj = analysis_obj with working_directory(EcomapsAnalysisWorkingDirectory(), os.path.join(os.path.dirname(__file__), self._source_dir)) as dir: log.debug("Analysis for %s has started" % self._analysis_obj.name) #RUN analysis = EcomapsAnalysis(dir, analysis_obj.run_by_user.name, analysis_obj.run_by_user.email) file_name = "%s_%s.nc" % (self._analysis_obj.name.replace(' ', '-'), str(datetime.datetime.now().isoformat()).replace(':', '-')) coverage_dict = {} for ds in analysis_obj.coverage_datasets: # Make a sensible data structure to tie the columns chosen # for each dataset with any time slice information coverage_dict[ds.dataset] = [(c.column, c.time_index) for c in ds.columns] # Now we have enough information to kick the analysis off output_file_loc, map_image_file_loc, \ fit_image_file_loc = analysis.run(analysis_obj.point_dataset.netcdf_url, coverage_dict, self._update_progress) # Write the result image to with open(map_image_file_loc, "rb") as img: encoded_image = base64.b64encode(img.read()) self._analysis_obj.result_image = encoded_image with open(fit_image_file_loc, "rb") as img: encoded_image = base64.b64encode(img.read()) self._analysis_obj.fit_image = encoded_image # Grab the "convenience" values from the dataset, which # we'll store against the analysis, saves looking in the netCDF each time try: netCdf = NetCDFDataset(output_file_loc, 'r', format='NETCDF4') self._analysis_obj.aic = str(netCdf.AIC) self._analysis_obj.model_formula = netCdf.model_formula except: log.warning('Failed to get netCDF attributes at the end of %s' % self._analysis_obj.name) # Copy the result file to the ecomaps THREDDS server # Set the file name to the name of the analysis + a bit of uniqueness shutil.copyfile(output_file_loc, os.path.join(self._netcdf_file_store, file_name)) # Generate a WMS URL for the output file... wms_url = self._thredds_wms_format % file_name # Create a result dataset result_ds = Dataset() result_ds.name = self._analysis_obj.name result_ds.wms_url = wms_url # 3 = result dataset result_ds.dataset_type_id = 3 result_ds.netcdf_url = self._open_ndap_format % file_name result_ds.viewable_by_user_id = analysis_obj.run_by_user.id # Tidy up the analysis object self._save_analysis(result_ds) self._update_progress('Complete', True)