def wps(): '''Returns a WPS instance''' # Initialize WPS client wps = WebProcessingService('http://example.org/wps', skip_caps=True) xml = open(resource_file('wps_CEDACapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) return wps
def run_wps(process_id,input,output): #choose the first wps engine my_engine = WebProcessingService('http://appsdev.hydroshare.org:8282/wps/WebProcessingService', verbose=False, skip_caps=True) my_engine.getcapabilities() my_process = my_engine.describeprocess(process_id) my_inputs = my_process.dataInputs input_names = [] #getting list of input for input1 in my_inputs: input_names.append(input1) #executing the process.. execution = my_engine.execute(process_id, input, output) request = execution.request #set store executeresponse to false request = request.replace('storeExecuteResponse="true"', 'storeExecuteResponse="false"') url_wps = 'http://appsdev.hydroshare.org:8282/wps/WebProcessingService' wps_request = urllib2.Request(url_wps,request) wps_open = urllib2.urlopen(wps_request) wps_read = wps_open.read() if 'href' in wps_read: tag = 'href="' location = wps_read.find(tag) new= wps_read[location+len(tag):len(wps_read)] tag2 = '"/>\n </wps:Output>\n </wps:ProcessOutputs>\n</wps:' location2 = new.find(tag2) final = new[0:location2] split = final.split() wps_request1 = urllib2.Request(split[0]) wps_open1 = urllib2.urlopen(wps_request1) wps_read1 = wps_open1.read() #return [final_output_url, final_data] return [wps_read1, split]
def test_wps_getcapabilities_52n(): # Initialize WPS client wps = WebProcessingService( 'http://geoprocessing.demo.52north.org:8080/52n-wps-webapp-3.3.1/WebProcessingService', skip_caps=True) # Execute fake invocation of GetCapabilities operation by parsing cached response from 52North service xml = open(resource_file('wps_52nCapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) # Check WPS description assert wps.identification.type == 'WPS' # Check available operations operations = [op.name for op in wps.operations] assert operations == [ 'GetCapabilities', 'DescribeProcess', 'Execute'] # Check high level process descriptions processes = [(p.identifier, p.title) for p in wps.processes] assert processes == [ ('org.n52.wps.server.algorithm.test.MultiReferenceInputAlgorithm', 'for testing multiple inputs by reference'), ('org.n52.wps.server.algorithm.test.EchoProcess', 'Echo process'), ('org.n52.wps.server.algorithm.test.MultiReferenceBinaryInputAlgorithm', 'for testing multiple binary inputs by reference'), # noqa ('org.n52.wps.server.algorithm.test.LongRunningDummyTestClass', 'org.n52.wps.server.algorithm.test.LongRunningDummyTestClass'), # noqa ('org.n52.wps.server.algorithm.JTSConvexHullAlgorithm', 'org.n52.wps.server.algorithm.JTSConvexHullAlgorithm'), ('org.n52.wps.server.algorithm.test.MultipleComplexInAndOutputsDummyTestClass', 'org.n52.wps.server.algorithm.test.MultipleComplexInAndOutputsDummyTestClass'), # noqa ('org.n52.wps.server.algorithm.test.DummyTestClass', 'org.n52.wps.server.algorithm.test.DummyTestClass')]
def test_wps_getcapabilities_usgs(): # Initialize WPS client wps = WebProcessingService('http://cida.usgs.gov/gdp/process/WebProcessingService', skip_caps=True) # Execute fake invocation of GetCapabilities operation by parsing cached response from USGS service xml = open(resource_file('wps_USGSCapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) # Check WPS description assert wps.updateSequence is not None assert wps.identification.type == 'WPS' assert wps.identification.title == 'Geo Data Portal WPS Implementation' assert wps.identification.abstract == 'A Geo Data Portal Service based on the 52north implementation of WPS 1.0.0' # Check available operations operations = [op.name for op in wps.operations] assert operations == [ 'GetCapabilities', 'DescribeProcess', 'Execute'] # Check high level process descriptions processes = [(p.identifier, p.title) for p in wps.processes] assert processes == [ ('gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles', 'gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.discovery.CalculateWCSCoverageInfo', 'gov.usgs.cida.gdp.wps.algorithm.discovery.CalculateWCSCoverageInfo'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.communication.EmailWhenFinishedAlgorithm', 'gov.usgs.cida.gdp.wps.algorithm.communication.EmailWhenFinishedAlgorithm'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.communication.GeoserverManagementAlgorithm', 'gov.usgs.cida.gdp.wps.algorithm.communication.GeoserverManagementAlgorithm'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.discovery.GetWcsCoverages', 'gov.usgs.cida.gdp.wps.algorithm.discovery.GetWcsCoverages'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.filemanagement.GetWatersGeom', 'gov.usgs.cida.gdp.wps.algorithm.filemanagement.GetWatersGeom'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.discovery.ListOpendapGrids', 'gov.usgs.cida.gdp.wps.algorithm.discovery.ListOpendapGrids'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.filemanagement.CreateNewShapefileDataStore', 'gov.usgs.cida.gdp.wps.algorithm.filemanagement.CreateNewShapefileDataStore'), # noqa ('gov.usgs.cida.gdp.wps.algorithm.discovery.GetGridTimeRange', 'gov.usgs.cida.gdp.wps.algorithm.discovery.GetGridTimeRange'), # noqa ]
def test_wps_getcapabilities_ceda(): # Initialize WPS client wps = WebProcessingService('http://ceda-wps2.badc.rl.ac.uk/wps', skip_caps=True) # Execute fake invocation of GetCapabilities operation by parsing cached response from USGS service xml = open(resource_file('wps_CEDACapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) # Check WPS description assert wps.identification.type == 'WPS' assert wps.identification.title == 'WPS Pylons Test Server' assert wps.identification.abstract is None # Check available operations operations = [op.name for op in wps.operations] assert operations == [ 'GetCapabilities', 'DescribeProcess', 'Execute'] # Check high level process descriptions processes = [(p.identifier, p.title) for p in wps.processes] assert processes == [ ('CDMSSubsetVariable', 'Writes a text file and returns an output.'), ('NCDumpIt', 'Calls ncdump on the input file path and writes it to an output file.'), ('TestDap', 'Writes a text file and returns an output.'), ('CDMSDescribeVariableDomain', 'Writes a text file and returns an output.'), ('CFCheck', 'Writes a text file and returns an output.'), ('DoubleIt', 'Doubles the input number and returns value'), ('SimplePlot', 'Creates a simple map plot.'), ('CDMSListDatasets', 'Writes a text file and returns an output.'), ('CDMSListVariables', 'Writes a text file and returns an output.'), ('WCSWrapper', 'Web Coverage Service Wrapper Process'), ('GetWeatherStations', 'Writes a text file with one weather station per line'), ('ListPPFileHeader', 'Writes a text file that contains a listing of pp-records in a file.'), ('TakeAges', 'A test process to last a long time.'), ('CMIP5FileFinder', 'Writes a test file of matched CMIP5 files.'), ('SubsetPPFile', 'Filters a PP-file to generate a new subset PP-file.'), ('ExtractUKStationData', 'ExtractUKStationData'), ('CDOWrapper1', 'Writes a text file and returns an output.'), ('MMDNCDiff', 'MMDNCDiff'), ('PlotRotatedGrid', 'Creates a plot - to show we can plot a rotated grid.'), ('MMDAsync', 'Writes a text file and returns an output.'), ('MashMyDataMultiplier', 'Writes a text file and returns an output.'), ('Delegator', 'Writes a text file and returns an output.'), ('ExArchProc1', 'Writes a text file and returns an output.'), ('CDOShowInfo', 'Writes a text file and returns an output.'), ('PostTest', 'Writes a text file and returns an output.'), ('StatusTestProcess', 'An process to test status responses'), ('WaitForFileDeletionCached', 'An asynchronous job that waits for a file to be deleted'), ('WaitForAllFilesToBeDeleted', 'An asynchronous job that waits for a number of files to be deleted'), ('AsyncTest', 'Does an asynchronous test job run'), ('SyncTest1', 'Just creates a file.'), ('WaitForFileDeletion', 'An asynchronous job that waits for a file to be deleted'), ('ProcessTemplate', 'Writes a text file and returns an output.')]
def run(self): responseToReturn = Response() try: wps = WebProcessingService(self.url) wps.getcapabilities() processes = [x.identifier for x in wps.processes] responseToReturn.status = 200 responseToReturn.data = processes except: responseToReturn.status = 500 self.statusChanged.emit(responseToReturn)
def run_wps(process_id,input,output): #choose the first wps engine #my_engine = WebProcessingService('http://appsdev.hydroshare.org:8282/wps/WebProcessingService', verbose=False, skip_caps=True) my_engine = WebProcessingService('http://appsdev.hydroshare.org:8282/wps/WebProcessingService',verbose=False, skip_caps=True) my_engine.getcapabilities() #wps_engines = list_wps_service_engines() #my_engine = wps_engines[0] #choose the r.time-series-converter my_process = my_engine.describeprocess(process_id) my_inputs = my_process.dataInputs input_names = [] #getting list of input for input1 in my_inputs: input_names.append(input1) #executing the process.. execution = my_engine.execute(process_id, input, output) request = execution.request #set store executeresponse to false request = request.replace('storeExecuteResponse="true"', 'storeExecuteResponse="false"') url_wps = 'http://appsdev.hydroshare.org:8282/wps/WebProcessingService' wps_request = urllib2.Request(url_wps,request) wps_open = urllib2.urlopen(wps_request) wps_read = wps_open.read() if 'href' in wps_read: tag = 'href="' location = wps_read.find(tag) new= wps_read[location+len(tag):len(wps_read)] tag2 = '"/>\n </wps:Output>\n </wps:ProcessOutputs>\n</wps:' location2 = new.find(tag2) final = new[0:location2] split = final.split() wps_request1 = urllib2.Request(split[0]) wps_open1 = urllib2.urlopen(wps_request1) wps_read1 = wps_open1.read() #now we must use our own method to send the request1 #we need to use the request #this code is for the normal wps which is not working right now # monitorExecution(execution) # output_data = execution.processOutputs # final_output_url = output_data[0].reference # final_data = read_final_data(final_output_url) #return [final_output_url, final_data] return [wps_read1, split]
def find_wps_service_engines(): try: wps_address = VFW_SERVER + '/wps' wps_service = WebProcessingService(wps_address, verbose=False, skip_caps=True) wps_service.getcapabilities() new_data = WpsModel(name=wps_service.identification.title, endpoint=wps_address) new_data.save() except: print('--- No WPS_Service at port 8094. ---')
def find_wps_service_engines(): try: # wps_address = 'http://localhost:5000/wps' wps_address = VFW_SERVER + '/wps' wps_service = WebProcessingService(wps_address, verbose=False, skip_caps=True) wps_service.getcapabilities() new_data = WpsModel(name=wps_service.identification.title, endpoint=wps_address) new_data.save() except: wps_log.debug( '--- Exception in utilities.py, find_wps_service_engines. (Maybe no WPS_Service at port 8094.) ---' ) print('--- No WPS_Service at port 8094. ---')
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def __init__(self, wfs_url=WFS_URL): # if WFS_URL is None: # from pygdp.namespaces import WFS_URL # wfsUrl = WFS_URL self.wfsUrl = wfs_url self.wpsUrl = WPS_URL self.version = '1.1.0' self.wps = WebProcessingService(self.wpsUrl) def WPSgetCapabilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) # pyGDP Submit Feature def dodsReplace(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return _execute_request.dodsReplace(dataSetURI, verbose) def submitFeatureCoverageOPenDAP(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage=True, outputfname=None, sleepSecs=10, async=False): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCoverageOPenDAP(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs, verbose, coverage, self.wfsUrl, outputfname, sleepSecs, async=async)
class RookWPS: def __init__(self, url): self.wps = WebProcessingService(url, verbose=False, skip_caps=True) def getcapabilities(self): self.wps.getcapabilities() return self.wps def describeprocess(self, identifier): return self.wps.describeprocess(identifier) def execute(self, identifier, inputs): outputs = [("output", True, None)] execution = self.wps.execute(identifier, inputs, output=outputs) monitorExecution(execution) print(execution.errors) assert execution.isSucceded() is True assert len(execution.processOutputs) > 0 ml_url = execution.processOutputs[0].reference xml = requests.get(ml_url).text urls = parse_metalink(xml) return urls
def test_wps_getcapabilities_52n(): # Initialize WPS client wps = WebProcessingService( 'http://geoprocessing.demo.52north.org:8080/52n-wps-webapp-3.3.1/WebProcessingService', skip_caps=True) # Execute fake invocation of GetCapabilities operation by parsing cached response from 52North service xml = open(resource_file('wps_52nCapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) # Check WPS description assert wps.identification.type == 'WPS' # Check available operations operations = [op.name for op in wps.operations] assert operations == ['GetCapabilities', 'DescribeProcess', 'Execute'] # Check high level process descriptions processes = [(p.identifier, p.title) for p in wps.processes] assert processes == [ ('org.n52.wps.server.algorithm.test.MultiReferenceInputAlgorithm', 'for testing multiple inputs by reference'), ('org.n52.wps.server.algorithm.test.EchoProcess', 'Echo process'), ('org.n52.wps.server.algorithm.test.MultiReferenceBinaryInputAlgorithm', 'for testing multiple binary inputs by reference'), # noqa ('org.n52.wps.server.algorithm.test.LongRunningDummyTestClass', 'org.n52.wps.server.algorithm.test.LongRunningDummyTestClass' ), # noqa ('org.n52.wps.server.algorithm.JTSConvexHullAlgorithm', 'org.n52.wps.server.algorithm.JTSConvexHullAlgorithm'), ('org.n52.wps.server.algorithm.test.MultipleComplexInAndOutputsDummyTestClass', 'org.n52.wps.server.algorithm.test.MultipleComplexInAndOutputsDummyTestClass' ), # noqa ('org.n52.wps.server.algorithm.test.DummyTestClass', 'org.n52.wps.server.algorithm.test.DummyTestClass') ]
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def _init_(self, wfsUrl=WFS_URL, wpsUrl=WPS_URL, version='1.1.0'): self.wfsUrl = wfsUrl self.wpsUrl = wpsUrl self.version = version self.wps = WebProcessingService(wpsUrl) def WPSgetCapbilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) def _encodeZipFolder(self, filename): """ This function will encode a zipfile and return the filename. """ #check extension if not filename.endswith('.zip'): raise Exception('Wrong filetype.') #encode the file with open(filename, 'rb') as fin: bytesRead = fin.read() encode= base64.b64encode(bytesRead) #renames the file and saves it onto local drive filename = filename.split('.') filename = str(filename[0]) + '_copy.' + str(filename[-1]) fout = open(filename, "w") fout.write(encode) fout.close() return filename def shapeToZip(self,inShape, outZip=None, allFiles=True): """Packs a shapefile to ZIP format. arguments -inShape - input shape file -outZip - output ZIP file (optional) default: <inShapeName>.zip in same folder as inShape (If full path not specified, output is written to to same folder as inShape) -allFiles - Include all files? (optional) True (default) - all shape file components False - just .shp,.shx,.dbf,.prj,shp.xml files reference: Esri, Inc, 1998, Esri Shapefile Technical Description http://www.esri.com/library/whitepapers/pdfs/shapefile.pdf author: Curtis Price, [email protected]""" if not os.path.splitext(inShape)[1] == ".shp": raise Exception, "inShape must be a *.shp" if not os.path.exists(inShape): raise Exception, "%s not found" % inShape # get shapefile root name "path/file.shp" -> "file" # and shapefile path rootName = os.path.splitext(os.path.basename(inShape))[0] inShape = os.path.realpath(inShape) inDir = os.path.dirname(inShape) # output zip file path if outZip in [None,""]: # default output: shapefilepath/shapefilename.zip outDir = inDir outZip = os.path.join(outDir,rootName) + ".zip" else: outDir = os.path.dirname(outZip) if outDir.strip() in ["","."]: # if full path not specified, use input shapefile folder outDir = os.path.dirname(os.path.realpath(inShape)) else: # if output path does exist, raise an exception if not os.path.exists(outDir): raise Exception, "Output folder %s not found" % outDir outZip = os.path.join(outDir,outZip) # enforce .zip extension outZip = os.path.splitext(outZip)[0] + ".zip" if not os.access(outDir, os.W_OK): raise Exception, "Output directory %s not writeable" % outDir if os.path.exists(outZip): os.unlink(outZip) try: # open zipfile zf = zipfile.ZipFile(outZip, 'w', zipfile.ZIP_DEFLATED) # write shapefile parts to zipfile ShapeExt = ["shp","shx","dbf","prj","shp.xml"] if allFiles: ShapeExt += ["sbn","sbx","fbn","fbx", "ain","aih","isx","mxs","atx","cpg"] for f in ["%s.%s" % (os.path.join(inDir,rootName),ext) for ext in ShapeExt]: if os.path.exists(f): zf.write(f) ##print f # debug print return outZip except Exception, msg: raise Exception, \ "Could not write zipfile " + outZip + "\n" + str(msg) finally:
class WPSClient(object): """Returns a class where every public method is a WPS process available at the given url. Example: >>> emu = WPSClient(url='<server url>') >>> emu.hello('stranger') 'Hello stranger' """ def __init__( self, url, processes=None, converters=None, username=None, password=None, headers=None, auth=None, verify=True, cert=None, verbose=False, progress=False, version=WPS_DEFAULT_VERSION, caps_xml=None, desc_xml=None, language=None, ): """ Args: url (str): Link to WPS provider. config (Config): an instance processes: Specify a subset of processes to bind. Defaults to all processes. converters (dict): Correspondence of {mimetype: class} to convert this mimetype to a python object. username (str): passed to :class:`owslib.wps.WebProcessingService` password (str): passed to :class:`owslib.wps.WebProcessingService` headers (str): passed to :class:`owslib.wps.WebProcessingService` auth (requests.auth.AuthBase): requests-style auth class to authenticate, see https://2.python-requests.org/en/master/user/authentication/ verify (bool): passed to :class:`owslib.wps.WebProcessingService` cert (str): passed to :class:`owslib.wps.WebProcessingService` verbose (str): passed to :class:`owslib.wps.WebProcessingService` progress (bool): If True, enable interactive user mode. version (str): WPS version to use. language (str): passed to :class:`owslib.wps.WebProcessingService` ex: 'fr-CA', 'en_US'. """ self._converters = converters self._interactive = progress self._mode = ASYNC if progress else SYNC self._notebook = notebook.is_notebook() self._inputs = {} self._outputs = {} if not verify: import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) if headers is None: headers = {} if auth is not None: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = requests.auth.HTTPBasicAuth(*auth) # We only need some headers from the requests.auth.AuthBase implementation # We prepare a dummy request, call the auth object with it, and get its headers dummy_request = requests.Request("get", "http://localhost") r = auth(dummy_request.prepare()) auth_headers = ["Authorization", "Proxy-Authorization", "Cookie"] headers.update( {h: r.headers[h] for h in auth_headers if h in r.headers}) self._wps = WebProcessingService(url, version=version, username=username, password=password, verbose=verbose, headers=headers, verify=verify, cert=cert, skip_caps=True, language=language) try: self._wps.getcapabilities(xml=caps_xml) except ServiceException as e: if "AccessForbidden" in str(e): raise UnauthorizedException( "You are not authorized to do a request of type: GetCapabilities" ) raise self._processes = self._get_process_description(processes, xml=desc_xml) # Build the methods for pid in self._processes: setattr(self, sanitize(pid), types.MethodType(self._method_factory(pid), self)) self.logger = logging.getLogger('WPSClient') if progress: self._setup_logging() self.__doc__ = utils.build_wps_client_doc(self._wps, self._processes) @property def language(self): return self._wps.language @language.setter def language(self, value): self._wps.language = value @property def languages(self): return self._wps.languages def _get_process_description(self, processes=None, xml=None): """Return the description for each process. Sends the server a `describeProcess` request for each process. Parameters ---------- processes: str, list, None A process name, a list of process names or None (for all processes). Returns ------- OrderedDict A dictionary keyed by the process identifier of process descriptions. """ all_wps_processes = [p.identifier for p in self._wps.processes] if processes is None: if owslib.__version__ > '0.17.0': # Get the description for all processes in one request. ps = self._wps.describeprocess('all', xml=xml) return OrderedDict((p.identifier, p) for p in ps) else: processes = all_wps_processes # Check for invalid process names, i.e. not matching the getCapabilities response. process_names, missing = utils.filter_case_insensitive( processes, all_wps_processes) if missing: message = "These process names were not found on the WPS server: {}" raise ValueError(message.format(", ".join(missing))) # Get the description for each process. ps = [self._wps.describeprocess(pid, xml=xml) for pid in process_names] return OrderedDict((p.identifier, p) for p in ps) def _setup_logging(self): self.logger.setLevel(logging.INFO) import sys fh = logging.StreamHandler(sys.stdout) fh.setFormatter(logging.Formatter('%(asctime)s: %(message)s')) self.logger.addHandler(fh) def _method_factory(self, pid): """Create a custom function signature with docstring, instantiate it and pass it to a wrapper which will actually call the process. Parameters ---------- pid: str Identifier of the WPS process. Returns ------- func A Python function calling the remote process, complete with docstring and signature. """ process = self._processes[pid] required_inputs_first = sorted(process.dataInputs, key=sort_inputs_key) input_names = [] # defaults will be set to the function's __defaults__: # A tuple containing default argument values for those arguments that have defaults, # or None if no arguments have a default value. defaults = [] for inpt in required_inputs_first: input_names.append(sanitize(inpt.identifier)) if inpt.minOccurs == 0 or inpt.defaultValue is not None: default = inpt.defaultValue if inpt.dataType != "ComplexData" else None defaults.append(utils.from_owslib(default, inpt.dataType)) defaults = tuple(defaults) if defaults else None body = dedent(""" inputs = locals() inputs.pop('self') return self._execute('{pid}', **inputs) """).format(pid=pid) func_builder = FunctionBuilder( name=sanitize(pid), doc=utils.build_process_doc(process), args=["self"] + input_names, defaults=defaults, body=body, filename=__file__, module=self.__module__, ) self._inputs[pid] = {} if hasattr(process, "dataInputs"): self._inputs[pid] = OrderedDict( (i.identifier, i) for i in process.dataInputs) self._outputs[pid] = {} if hasattr(process, "processOutputs"): self._outputs[pid] = OrderedDict( (o.identifier, o) for o in process.processOutputs) func = func_builder.get_func() return func def _build_inputs(self, pid, **kwargs): """Build the input sequence from the function arguments.""" wps_inputs = [] for name, input_param in list(self._inputs[pid].items()): arg = kwargs.get(sanitize(name)) if arg is None: continue values = [ arg, ] if not isinstance(arg, (list, tuple)) else arg supported_mimetypes = [ v.mimeType for v in input_param.supportedValues ] for value in values: # if input_param.dataType == "ComplexData": seems simpler if isinstance(input_param.defaultValue, ComplexData): # Guess the mimetype of the input value mimetype, encoding = guess_type(value, supported_mimetypes) if encoding is None: encoding = input_param.defaultValue.encoding if isinstance(value, ComplexData): inp = value # Either embed the file content or just the reference. else: if utils.is_embedded_in_request(self._wps.url, value): # If encoding is None, this will return the actual encoding used (utf-8 or base64). value, encoding = embed(value, mimetype, encoding=encoding) else: value = fix_url(str(value)) inp = utils.to_owslib(value, data_type=input_param.dataType, encoding=encoding, mimetype=mimetype) else: inp = utils.to_owslib(value, data_type=input_param.dataType) wps_inputs.append((name, inp)) return wps_inputs def _execute(self, pid, **kwargs): """Execute the process.""" wps_inputs = self._build_inputs(pid, **kwargs) wps_outputs = [(o.identifier, "ComplexData" in o.dataType) for o in list(self._outputs[pid].values())] mode = self._mode if self._processes[pid].storeSupported else SYNC try: wps_response = self._wps.execute(pid, inputs=wps_inputs, output=wps_outputs, mode=mode) if self._interactive and self._processes[pid].statusSupported: if self._notebook: notebook.monitor(wps_response, sleep=.2) else: self._console_monitor(wps_response) except ServiceException as e: if "AccessForbidden" in str(e): raise UnauthorizedException( "You are not authorized to do a request of type: Execute") raise # Add the convenience methods of WPSResult to the WPSExecution class. This adds a `get` method. utils.extend_instance(wps_response, WPSResult) wps_response.attach(wps_outputs=self._outputs[pid], converters=self._converters) return wps_response def _console_monitor(self, execution, sleep=3): """Monitor the execution of a process. Parameters ---------- execution : WPSExecution instance The execute response to monitor. sleep: float Number of seconds to wait before each status check. """ import signal # Intercept CTRL-C def sigint_handler(signum, frame): self.cancel() signal.signal(signal.SIGINT, sigint_handler) while not execution.isComplete(): execution.checkStatus(sleepSecs=sleep) self.logger.info("{} [{}/100] - {} ".format( execution.process.identifier, execution.percentCompleted, execution.statusMessage[:50], )) if execution.isSucceded(): self.logger.info("{} done.".format(execution.process.identifier)) else: self.logger.info("{} failed.".format(execution.process.identifier))
identifier = arg elif opt in ("-v", "--verbose"): verbose = True else: assert False, "Unhandled option" # required arguments for all requests if request is None or url is None: usage() sys.exit(3) # instantiate client wps = WebProcessingService(url, verbose=verbose, skip_caps=True) if request == "GetCapabilities": wps.getcapabilities() print("WPS Identification type: %s" % wps.identification.type) print("WPS Identification title: %s" % wps.identification.title) print("WPS Identification abstract: %s" % wps.identification.abstract) for operation in wps.operations: print("WPS Operation: %s" % operation.name) for process in wps.processes: print("WPS Process: identifier=%s title=%s" % (process.identifier, process.title)) elif request == "DescribeProcess": if identifier is None: print('\nERROR: missing mandatory "-i (or --identifier)" argument') usage() sys.exit(4) process = wps.describeprocess(identifier) print("WPS Process: identifier=%s" % process.identifier)
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def _init_(self, wfsUrl=WFS_URL, wpsUrl=WPS_URL, version='1.1.0'): self.wfsUrl = wfsUrl self.wpsUrl = wpsUrl self.version = version self.wps = WebProcessingService(wpsUrl) def WPSgetCapbilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) def _encodeZipFolder(self, filename): """ This function will encode a zipfile and return the filename. """ #check extension if not filename.endswith('.zip'): print 'Wrong filetype.' return #encode the file with open(filename, 'rb') as fin: bytesRead = fin.read() encode= base64.b64encode(bytesRead) #renames the file and saves it onto local drive filename = filename.split('.') filename = str(filename[0]) + '_copy.' + str(filename[-1]) fout = open(filename, "w") fout.write(encode) fout.close() return filename def uploadShapeFile(self, filePath): """ Given a file, this function encodes the file and uploads it onto geoserver. """ # encodes the file, opens it, reads it, and closes it # returns a filename in form of: filename_copy.zip filePath = self._encodeZipFolder(filePath) if filePath is None: return filehandle = open(filePath, 'r') filedata = filehandle.read() filehandle.close() os.remove(filePath) # deletes the encoded file # this if for naming the file on geoServer filename = filePath.split("/") # gets rid of filepath, keeps only filename eg: file.zip filename = filename[len(filename) - 1] filename = filename.replace("_copy.zip", "") # check to make sure a file with the same name does not exist fileCheckString = "upload:" + filename shapefiles = self.getShapefiles() if fileCheckString in shapefiles: print 'File exists already.' return xmlGen = gdpXMLGenerator() root = xmlGen.getUploadXMLtree(filename, upload_URL, filedata) # now we have a complete XML upload request uploadRequest = etree.tostring(root) POST = WebProcessingService(WPS_Service) execution = POST.execute(None, [], request=uploadRequest) monitorExecution(execution) return "upload:"+filename def getTuples(self, shapefile, attribute): """ Will return the dictionary tuples only. """ return self.getValues(shapefile, attribute, getTuples='only') def _getGMLIDString(self, GMLbeginterm, line, GMLstopterm, valBeginTerm, valStopTerm): """ This function is specific to the output documents from the GDP. This function parses the XML document, to find the correct GMLID associated with a feature. Returns the list of values, and a dictionary [feature:id]. """ # we are searching for attr-value, gml:id pair value = [] ntuple = [] begin_index = 0 end_index = len(line) tmpline = line # start at the beginning while begin_index < len(line): begin_index = tmpline.find(GMLbeginterm) if begin_index != -1: end_index = tmpline.find(GMLstopterm, begin_index) # we get the gml term gmlterm = tmpline[begin_index + len(GMLbeginterm) : end_index ] # now we get the attribute value begin_index2 = tmpline.find(valBeginTerm) end_index2 = tmpline.find(valStopTerm, begin_index2) valTerm = tmpline[begin_index2 + len(valBeginTerm) : end_index2 ] #tuple: attr-value, gml:id tmpTuple = valTerm, gmlterm ntuple.append(tmpTuple) tmpline = tmpline[end_index2 :] if valTerm not in value: value.append(valTerm) begin_index = end_index #print begin_index else: break return value, ntuple def _urlen(self, typename): """ Sets up a cgi request to the wfs for features specified. """ service_url = GDP_URL qs = [] if service_url.find('?') != -1: qs = cgi.parse_qsl(service_url.split('?')[1]) params = [x[0] for x in qs] if 'service' not in params: qs.append(('service', 'WFS')) if 'request' not in params: qs.append(('request', 'DescribeFeatureType')) if 'version' not in params: qs.append(('version', '1.1.0')) if 'typename' not in params: qs.append(('typename', typename)) urlqs = urlencode(tuple(qs)) return service_url.split('?')[0] + '?' + urlqs def _getStringBetween(self, beginterm, line, stopterm): """ Helper function. Gets the string between beginterm and stopterm. Line is the line or superstring to be examined. returns the string inbetween. """ begin_index = line.find(beginterm) end_index = line.find(stopterm, begin_index) return line[begin_index + len(beginterm) : end_index ] def _getLinesContaining(self, linesToParse, term): """ Given a document, goes through the document and for each line with the occurence of the specified term, add that line to a list. Returns the list. """ line_list = [] for line in linesToParse: if term in line: line_list.append(line) linesToParse.close() return line_list def _getFilterID(self,tuples, value): """ Given a the tuples generated by getTuples and a value, will return a list of gmlIDs associated with the value specified. """ value = str(value) filterID = [] for item in tuples: if item[0] == value: filterID.append(item[1]) return filterID def _parseXMLNodesForTagText(self, xml, tag): """ Parses through a XML tree for text associated with specified tag. Returns a list of the text. """ tag_text = [] for node in xml.iter(): if node.tag == tag: tag_text.append(node.text) return tag_text def _generateRequest(self, dataSetURI, algorithm, method, varID=None, verbose=False): """ Takes a dataset uri, algorithm, method, and datatype. This function will generate a simple XML document to make the request specified. (Only works for ListOpendapGrids and GetGridTimeRange). Will return a list containing the info requested for (either data types or time range). """ wps_Service = 'http://cida.usgs.gov/gdp/utility/WebProcessingService' POST = WebProcessingService(wps_Service, verbose=False) xmlGen = gdpXMLGenerator() root = xmlGen.getXMLRequestTree(dataSetURI, algorithm, method, varID, verbose) # change standard output to not display waiting status if not verbose: old_stdout = sys.stdout result = StringIO() sys.stdout = result request = etree.tostring(root) execution = POST.execute(None, [], request=request) if method == 'getDataSetTime': seekterm = 'time' else: seekterm = 'name' if not verbose: sys.stdout = old_stdout return self._parseXMLNodesForTagText(execution.response, seekterm) def _generateFeatureRequest(self, typename, attribute=None): """ This function, given a attribute and a typename or filename will return a list of values associated with the file and the attribute chosen. """ service_url = GDP_URL qs = [] if service_url.find('?') != -1: qs = cgi.parse_qsl(service_url.split('?')[1]) params = [x[0] for x in qs] if 'service' not in params: qs.append(('service', 'WFS')) if 'request' not in params: if attribute is None: qs.append(('request', 'DescribeFeatureType')) else: qs.append(('request', 'GetFeature')) if 'version' not in params: qs.append(('version', '1.1.0')) if 'typename' not in params: qs.append(('typename', typename)) if attribute is not None: if 'propertyname' not in params: qs.append(('propertyname', attribute)) urlqs = urlencode(tuple(qs)) return service_url.split('?')[0] + '?' + urlqs def getAttributes(self, shapefile): """ Given a valid shapefile, this function will create a cgi call returning a list of attributes associated with the shapefile. """ # makes a call to get an xml document containing list of shapefiles urlen = self._generateFeatureRequest(shapefile) linesToParse = urlopen(urlen) # gets back from the linesToParse document, all lines with 2nd arg lines = self._getLinesContaining(linesToParse, 'xsd:element maxOccurs=') attributes = [] # search the line for item in lines: word = self._getStringBetween('name=', item, ' ') # for attributes, will return "attribute", qoutes included, strip qoutes if word[1:len(word) - 1] != "the_geom": attributes.append(word[1: len(word) - 1]) return attributes def getShapefiles(self): """ Returns a list of available files currently on geoserver. """ wfs = WebFeatureService(GDP_URL) shapefiles = wfs.contents.keys() return shapefiles def getValues(self, shapefile, attribute, getTuples='false'): """ Similar to get attributes, given a shapefile and a valid attribute this function will make a call to the Web Feature Services returning a list of values associated with the shapefile and attribute. If getTuples = True, will also return the tuples of [feature:id] along with values [feature] """ urlen = self._generateFeatureRequest(shapefile, attribute) inputObject = urlopen(urlen) shapefileterm = shapefile.split(':') strinx = inputObject.read() lines = strinx.split('\n') # gets the tag/namespace name stringSnippet = self._getStringBetween('<', lines[1], ':'+attribute+'>') stringSnippet = stringSnippet.split('<') shapefileterm[0] = stringSnippet[len(stringSnippet) - 1] # look for this pattern: <term[0]:attribute>SOUGHTWORD</term[0]:attribute> values, tuples = self._getGMLIDString('gml:id="', lines[1], '">', '<'+shapefileterm[0] + ':' + attribute + '>', '</' +shapefileterm[0] +':' + attribute + '>') if getTuples=='true': return sorted(values), sorted(tuples) elif getTuples=='only': return sorted(tuples) else: return sorted(values) def getDataType(self, dataSetURI, verbose=False): """ Set up a get Data type request given a dataSetURI. Returns a list of all available data types. If verbose = True, will print on screen the waiting seq. for response document. """ algorithm = 'gov.usgs.cida.gdp.wps.algorithm.discovery.ListOpendapGrids' return self._generateRequest(dataSetURI, algorithm, method='getDataType', varID=None, verbose=verbose) def getDataSetURI(self): """ This function will not be implemented. This function is only implemented to give a few dataset URIs which may not work with certain datasets and will with others within the bounding box requirements. """ print 'The dataSetURI outputs a select few URIs and may not work with the specific shapefile you are providing.' print 'To ensure compatibility, we recommend selecting a dataSetURI that is specific to the shapefile.' print 'Or you may utilize the web gdp @ http://cida.usgs.gov/gdp/ to get a dataSet matching your specified shapefile.' print dataSetURIs = ['http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/NCEP/merged/monthly/RegCM3_A2_monthly_merged_NCEP.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml', 'http://cida.usgs.gov/qa/thredds/dodsC/prism', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/alaska_grid.w_meta.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/gmo/GMO_w_meta.ncml'] return dataSetURIs def getGMLIDs(self, shapefile, attribute, value): """ This function returns the gmlID associated with a particular attribute value. """ tuples = self.getTuples(shapefile, attribute) return self._getFilterID(tuples, value) def getTimeRange(self, dataSetURI, varID, verbose=False): """ Set up a get dataset time range request given a datatype and dataset uri. Returns the range of the earliest and latest time. If verbose = True, will print on screen the waiting seq. for response document. """ algorithm = 'gov.usgs.cida.gdp.wps.algorithm.discovery.GetGridTimeRange' return self._generateRequest(dataSetURI, algorithm, method='getDataSetTime', varID=varID, verbose=verbose) def _getFeatureCollectionGeoType(self, geoType, attribute='the_geom', value=None, gmlIDs=None): """ This function returns a featurecollection. It takes a geotype and determines if the geotype is a shapfile or polygon. """ # This is a polygon if isinstance(geoType, list): return GMLMultiPolygonFeatureCollection( [geoType] ) elif isinstance(geoType, str): if value==None and gmlIDs==None: print 'must input a value and attribute for shapefile' return else: tmpID = [] if gmlIDs is None: if type(value) == type(tmpID): gmlIDs = [] for v in value: tuples = self.getTuples(geoType, attribute) tmpID = self._getFilterID(tuples, v) gmlIDs = gmlIDs + tmpID else: tuples = self.getTuples(geoType, attribute) gmlIDs = self._getFilterID(tuples, value) query = WFSQuery(geoType, propertyNames=["the_geom", attribute], filters=gmlIDs) return WFSFeatureCollection(WFS_URL, query) else: print 'Geotype is not a shapefile or a recognizable polygon.' return None def _executeRequest(self, processid, inputs, verbose): """ This function makes a call to the Web Processing Service with the specified user inputs. """ wps = WebProcessingService(WPS_URL) # if verbose=True, then will we will monitor the status of the call. # if verbose=False, then we will return only the file outputpath. if not verbose: # redirects the standard output to avoid printing request status old_stdout = sys.stdout result = StringIO() sys.stdout = result # executes the request execution = wps.execute(processid, inputs, output = "OUTPUT") monitorExecution(execution, download=True) # sets the standard output back to original sys.stdout = old_stdout result_string = result.getvalue() #parses the redirected output to get the filepath of the saved file output = result_string.split('\n') tmp = output[len(output) - 2].split(' ') return tmp[len(tmp)-1] # executes the request execution = wps.execute(processid, inputs, output = "OUTPUT") monitorExecution(execution, download=True) def submitFeatureWeightedGridStatistics(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=None, coverage='true', delim='COMMA', stat='MEAN', grpby='STATISTIC', timeStep='false', summAttr='false'): """ Makes a featureWeightedGridStatistics algorithm call. """ featureCollection = self._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm' inputs = [("FEATURE_ATTRIBUTE_NAME",attribute), ("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("TIME_START",startTime), ("TIME_END",endTime), ("REQUIRE_FULL_COVERAGE",coverage), ("DELIMITER",delim), ("STATISTICS",stat), ("GROUP_BY", grpby), ("SUMMARIZE_TIMESTEP", timeStep), ("SUMMARIZE_FEATURE_ATTRIBUTE",summAttr), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCoverageOPenDAP(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true'): """ Makes a featureCoverageOPenDAP algorithm call. """ featureCollection = self._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageOPeNDAPIntersectionAlgorithm' inputs = [ ("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("TIME_START",startTime), ("TIME_END",endTime), ("REQUIRE_FULL_COVERAGE",coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCoverageWCSIntersection(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true'): """ Makes a featureCoverageWCSIntersection algorithm call. """ featureCollection = self._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageIntersectionAlgorithm' inputs = [("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("REQUIRE_FULL_COVERAGE",coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCategoricalGridCoverage(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', delim='COMMA'): """ Makes a featureCategoricalGridCoverage algorithm call. """ featureCollection = self._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCategoricalGridCoverageAlgorithm' inputs = [ ("FEATURE_ATTRIBUTE_NAME",attribute), ("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("DELIMITER", delim), ("REQUIRE_FULL_COVERAGE",coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose)
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def _init_(self, wfsUrl=WFS_URL, wpsUrl=WPS_URL, version='1.1.0'): self.wfsUrl = wfsUrl self.wpsUrl = wpsUrl self.version = version self.wps = WebProcessingService(wpsUrl) def WPSgetCapbilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) def _encodeZipFolder(self, filename): """ This function will encode a zipfile and return the filename. """ #check extension if not filename.endswith('.zip'): print 'Wrong filetype.' return #encode the file with open(filename, 'rb') as fin: bytesRead = fin.read() encode = base64.b64encode(bytesRead) #renames the file and saves it onto local drive filename = filename.split('.') filename = str(filename[0]) + '_copy.' + str(filename[-1]) fout = open(filename, "w") fout.write(encode) fout.close() return filename def uploadShapeFile(self, filePath): """ Given a file, this function encodes the file and uploads it onto geoserver. """ # encodes the file, opens it, reads it, and closes it # returns a filename in form of: filename_copy.zip filePath = self._encodeZipFolder(filePath) if filePath is None: return filehandle = open(filePath, 'r') filedata = filehandle.read() filehandle.close() os.remove(filePath) # deletes the encoded file # this if for naming the file on geoServer filename = filePath.split("/") # gets rid of filepath, keeps only filename eg: file.zip filename = filename[len(filename) - 1] filename = filename.replace("_copy.zip", "") # check to make sure a file with the same name does not exist fileCheckString = "upload:" + filename shapefiles = self.getShapefiles() if fileCheckString in shapefiles: print 'File exists already.' return xmlGen = gdpXMLGenerator() root = xmlGen.getUploadXMLtree(filename, upload_URL, filedata) # now we have a complete XML upload request uploadRequest = etree.tostring(root) POST = WebProcessingService(WPS_Service) execution = POST.execute(None, [], request=uploadRequest) monitorExecution(execution) return "upload:" + filename def getTuples(self, shapefile, attribute): """ Will return the dictionary tuples only. """ return self.getValues(shapefile, attribute, getTuples='only') def _getGMLIDString(self, GMLbeginterm, line, GMLstopterm, valBeginTerm, valStopTerm): """ This function is specific to the output documents from the GDP. This function parses the XML document, to find the correct GMLID associated with a feature. Returns the list of values, and a dictionary [feature:id]. """ # we are searching for attr-value, gml:id pair value = [] ntuple = [] begin_index = 0 end_index = len(line) tmpline = line # start at the beginning while begin_index < len(line): begin_index = tmpline.find(GMLbeginterm) if begin_index != -1: end_index = tmpline.find(GMLstopterm, begin_index) # we get the gml term gmlterm = tmpline[begin_index + len(GMLbeginterm):end_index] # now we get the attribute value begin_index2 = tmpline.find(valBeginTerm) end_index2 = tmpline.find(valStopTerm, begin_index2) valTerm = tmpline[begin_index2 + len(valBeginTerm):end_index2] #tuple: attr-value, gml:id tmpTuple = valTerm, gmlterm ntuple.append(tmpTuple) tmpline = tmpline[end_index2:] if valTerm not in value: value.append(valTerm) begin_index = end_index #print begin_index else: break return value, ntuple def _urlen(self, typename): """ Sets up a cgi request to the wfs for features specified. """ service_url = GDP_URL qs = [] if service_url.find('?') != -1: qs = cgi.parse_qsl(service_url.split('?')[1]) params = [x[0] for x in qs] if 'service' not in params: qs.append(('service', 'WFS')) if 'request' not in params: qs.append(('request', 'DescribeFeatureType')) if 'version' not in params: qs.append(('version', '1.1.0')) if 'typename' not in params: qs.append(('typename', typename)) urlqs = urlencode(tuple(qs)) return service_url.split('?')[0] + '?' + urlqs def _getStringBetween(self, beginterm, line, stopterm): """ Helper function. Gets the string between beginterm and stopterm. Line is the line or superstring to be examined. returns the string inbetween. """ begin_index = line.find(beginterm) end_index = line.find(stopterm, begin_index) return line[begin_index + len(beginterm):end_index] def _getLinesContaining(self, linesToParse, term): """ Given a document, goes through the document and for each line with the occurence of the specified term, add that line to a list. Returns the list. """ line_list = [] for line in linesToParse: if term in line: line_list.append(line) linesToParse.close() return line_list def _getFilterID(self, tuples, value): """ Given a the tuples generated by getTuples and a value, will return a list of gmlIDs associated with the value specified. """ value = str(value) filterID = [] for item in tuples: if item[0] == value: filterID.append(item[1]) return filterID def _parseXMLNodesForTagText(self, xml, tag): """ Parses through a XML tree for text associated with specified tag. Returns a list of the text. """ tag_text = [] for node in xml.iter(): if node.tag == tag: tag_text.append(node.text) return tag_text def _generateRequest(self, dataSetURI, algorithm, method, varID=None, verbose=False): """ Takes a dataset uri, algorithm, method, and datatype. This function will generate a simple XML document to make the request specified. (Only works for ListOpendapGrids and GetGridTimeRange). Will return a list containing the info requested for (either data types or time range). """ wps_Service = 'http://cida.usgs.gov/gdp/utility/WebProcessingService' POST = WebProcessingService(wps_Service, verbose=False) xmlGen = gdpXMLGenerator() root = xmlGen.getXMLRequestTree(dataSetURI, algorithm, method, varID, verbose) # change standard output to not display waiting status if not verbose: old_stdout = sys.stdout result = StringIO() sys.stdout = result request = etree.tostring(root) execution = POST.execute(None, [], request=request) if method == 'getDataSetTime': seekterm = 'time' else: seekterm = 'name' if not verbose: sys.stdout = old_stdout return self._parseXMLNodesForTagText(execution.response, seekterm) def _generateFeatureRequest(self, typename, attribute=None): """ This function, given a attribute and a typename or filename will return a list of values associated with the file and the attribute chosen. """ service_url = GDP_URL qs = [] if service_url.find('?') != -1: qs = cgi.parse_qsl(service_url.split('?')[1]) params = [x[0] for x in qs] if 'service' not in params: qs.append(('service', 'WFS')) if 'request' not in params: if attribute is None: qs.append(('request', 'DescribeFeatureType')) else: qs.append(('request', 'GetFeature')) if 'version' not in params: qs.append(('version', '1.1.0')) if 'typename' not in params: qs.append(('typename', typename)) if attribute is not None: if 'propertyname' not in params: qs.append(('propertyname', attribute)) urlqs = urlencode(tuple(qs)) return service_url.split('?')[0] + '?' + urlqs def getAttributes(self, shapefile): """ Given a valid shapefile, this function will create a cgi call returning a list of attributes associated with the shapefile. """ # makes a call to get an xml document containing list of shapefiles urlen = self._generateFeatureRequest(shapefile) linesToParse = urlopen(urlen) # gets back from the linesToParse document, all lines with 2nd arg lines = self._getLinesContaining(linesToParse, 'xsd:element maxOccurs=') attributes = [] # search the line for item in lines: word = self._getStringBetween('name=', item, ' ') # for attributes, will return "attribute", qoutes included, strip qoutes if word[1:len(word) - 1] != "the_geom": attributes.append(word[1:len(word) - 1]) return attributes def getShapefiles(self): """ Returns a list of available files currently on geoserver. """ wfs = WebFeatureService(GDP_URL) shapefiles = wfs.contents.keys() return shapefiles def getValues(self, shapefile, attribute, getTuples='false'): """ Similar to get attributes, given a shapefile and a valid attribute this function will make a call to the Web Feature Services returning a list of values associated with the shapefile and attribute. If getTuples = True, will also return the tuples of [feature:id] along with values [feature] """ urlen = self._generateFeatureRequest(shapefile, attribute) inputObject = urlopen(urlen) shapefileterm = shapefile.split(':') strinx = inputObject.read() lines = strinx.split('\n') # gets the tag/namespace name stringSnippet = self._getStringBetween('<', lines[1], ':' + attribute + '>') stringSnippet = stringSnippet.split('<') shapefileterm[0] = stringSnippet[len(stringSnippet) - 1] # look for this pattern: <term[0]:attribute>SOUGHTWORD</term[0]:attribute> values, tuples = self._getGMLIDString( 'gml:id="', lines[1], '">', '<' + shapefileterm[0] + ':' + attribute + '>', '</' + shapefileterm[0] + ':' + attribute + '>') if getTuples == 'true': return sorted(values), sorted(tuples) elif getTuples == 'only': return sorted(tuples) else: return sorted(values) def getDataType(self, dataSetURI, verbose=False): """ Set up a get Data type request given a dataSetURI. Returns a list of all available data types. If verbose = True, will print on screen the waiting seq. for response document. """ algorithm = 'gov.usgs.cida.gdp.wps.algorithm.discovery.ListOpendapGrids' return self._generateRequest(dataSetURI, algorithm, method='getDataType', varID=None, verbose=verbose) def getDataSetURI(self): """ This function will not be implemented. This function is only implemented to give a few dataset URIs which may not work with certain datasets and will with others within the bounding box requirements. """ print 'The dataSetURI outputs a select few URIs and may not work with the specific shapefile you are providing.' print 'To ensure compatibility, we recommend selecting a dataSetURI that is specific to the shapefile.' print 'Or you may utilize the web gdp @ http://cida.usgs.gov/gdp/ to get a dataSet matching your specified shapefile.' print dataSetURIs = [ 'http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/NCEP/merged/monthly/RegCM3_A2_monthly_merged_NCEP.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml', 'http://cida.usgs.gov/qa/thredds/dodsC/prism', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/alaska_grid.w_meta.ncml', 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/gmo/GMO_w_meta.ncml' ] return dataSetURIs def getGMLIDs(self, shapefile, attribute, value): """ This function returns the gmlID associated with a particular attribute value. """ tuples = self.getTuples(shapefile, attribute) return self._getFilterID(tuples, value) def getTimeRange(self, dataSetURI, varID, verbose=False): """ Set up a get dataset time range request given a datatype and dataset uri. Returns the range of the earliest and latest time. If verbose = True, will print on screen the waiting seq. for response document. """ algorithm = 'gov.usgs.cida.gdp.wps.algorithm.discovery.GetGridTimeRange' return self._generateRequest(dataSetURI, algorithm, method='getDataSetTime', varID=varID, verbose=verbose) def _getFeatureCollectionGeoType(self, geoType, attribute='the_geom', value=None, gmlIDs=None): """ This function returns a featurecollection. It takes a geotype and determines if the geotype is a shapfile or polygon. """ # This is a polygon if isinstance(geoType, list): return GMLMultiPolygonFeatureCollection([geoType]) elif isinstance(geoType, str): if value == None and gmlIDs == None: print 'must input a value and attribute for shapefile' return else: tmpID = [] if gmlIDs is None: if type(value) == type(tmpID): gmlIDs = [] for v in value: tuples = self.getTuples(geoType, attribute) tmpID = self._getFilterID(tuples, v) gmlIDs = gmlIDs + tmpID else: tuples = self.getTuples(geoType, attribute) gmlIDs = self._getFilterID(tuples, value) query = WFSQuery(geoType, propertyNames=["the_geom", attribute], filters=gmlIDs) return WFSFeatureCollection(WFS_URL, query) else: print 'Geotype is not a shapefile or a recognizable polygon.' return None def _executeRequest(self, processid, inputs, verbose): """ This function makes a call to the Web Processing Service with the specified user inputs. """ wps = WebProcessingService(WPS_URL) # if verbose=True, then will we will monitor the status of the call. # if verbose=False, then we will return only the file outputpath. if not verbose: # redirects the standard output to avoid printing request status old_stdout = sys.stdout result = StringIO() sys.stdout = result # executes the request execution = wps.execute(processid, inputs, output="OUTPUT") monitorExecution(execution, download=True) # sets the standard output back to original sys.stdout = old_stdout result_string = result.getvalue() #parses the redirected output to get the filepath of the saved file output = result_string.split('\n') tmp = output[len(output) - 2].split(' ') return tmp[len(tmp) - 1] # executes the request execution = wps.execute(processid, inputs, output="OUTPUT") monitorExecution(execution, download=True) def submitFeatureWeightedGridStatistics(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=None, coverage='true', delim='COMMA', stat='MEAN', grpby='STATISTIC', timeStep='false', summAttr='false'): """ Makes a featureWeightedGridStatistics algorithm call. """ featureCollection = self._getFeatureCollectionGeoType( geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm' inputs = [("FEATURE_ATTRIBUTE_NAME", attribute), ("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("TIME_START", startTime), ("TIME_END", endTime), ("REQUIRE_FULL_COVERAGE", coverage), ("DELIMITER", delim), ("STATISTICS", stat), ("GROUP_BY", grpby), ("SUMMARIZE_TIMESTEP", timeStep), ("SUMMARIZE_FEATURE_ATTRIBUTE", summAttr), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCoverageOPenDAP(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true'): """ Makes a featureCoverageOPenDAP algorithm call. """ featureCollection = self._getFeatureCollectionGeoType( geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageOPeNDAPIntersectionAlgorithm' inputs = [("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("TIME_START", startTime), ("TIME_END", endTime), ("REQUIRE_FULL_COVERAGE", coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCoverageWCSIntersection(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true'): """ Makes a featureCoverageWCSIntersection algorithm call. """ featureCollection = self._getFeatureCollectionGeoType( geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageIntersectionAlgorithm' inputs = [("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("REQUIRE_FULL_COVERAGE", coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose) def submitFeatureCategoricalGridCoverage(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', delim='COMMA'): """ Makes a featureCategoricalGridCoverage algorithm call. """ featureCollection = self._getFeatureCollectionGeoType( geoType, attribute, value, gmlIDs) if featureCollection is None: return processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCategoricalGridCoverageAlgorithm' inputs = [("FEATURE_ATTRIBUTE_NAME", attribute), ("DATASET_URI", dataSetURI), ("DATASET_ID", varID), ("DELIMITER", delim), ("REQUIRE_FULL_COVERAGE", coverage), ("FEATURE_COLLECTION", featureCollection)] return self._executeRequest(processid, inputs, verbose)
def test_wps_getcapabilities_ceda(): # Initialize WPS client wps = WebProcessingService('http://ceda-wps2.badc.rl.ac.uk/wps', skip_caps=True) # Execute fake invocation of GetCapabilities operation by parsing cached response from USGS service xml = open(resource_file('wps_CEDACapabilities.xml'), 'rb').read() wps.getcapabilities(xml=xml) # Check WPS description assert wps.identification.type == 'WPS' assert wps.identification.title == 'WPS Pylons Test Server' assert wps.identification.abstract is None # Check available operations operations = [op.name for op in wps.operations] assert operations == ['GetCapabilities', 'DescribeProcess', 'Execute'] # Check high level process descriptions processes = [(p.identifier, p.title) for p in wps.processes] assert processes == [ ('CDMSSubsetVariable', 'Writes a text file and returns an output.'), ('NCDumpIt', 'Calls ncdump on the input file path and writes it to an output file.' ), ('TestDap', 'Writes a text file and returns an output.'), ('CDMSDescribeVariableDomain', 'Writes a text file and returns an output.'), ('CFCheck', 'Writes a text file and returns an output.'), ('DoubleIt', 'Doubles the input number and returns value'), ('SimplePlot', 'Creates a simple map plot.'), ('CDMSListDatasets', 'Writes a text file and returns an output.'), ('CDMSListVariables', 'Writes a text file and returns an output.'), ('WCSWrapper', 'Web Coverage Service Wrapper Process'), ('GetWeatherStations', 'Writes a text file with one weather station per line'), ('ListPPFileHeader', 'Writes a text file that contains a listing of pp-records in a file.' ), ('TakeAges', 'A test process to last a long time.'), ('CMIP5FileFinder', 'Writes a test file of matched CMIP5 files.'), ('SubsetPPFile', 'Filters a PP-file to generate a new subset PP-file.'), ('ExtractUKStationData', 'ExtractUKStationData'), ('CDOWrapper1', 'Writes a text file and returns an output.'), ('MMDNCDiff', 'MMDNCDiff'), ('PlotRotatedGrid', 'Creates a plot - to show we can plot a rotated grid.'), ('MMDAsync', 'Writes a text file and returns an output.'), ('MashMyDataMultiplier', 'Writes a text file and returns an output.'), ('Delegator', 'Writes a text file and returns an output.'), ('ExArchProc1', 'Writes a text file and returns an output.'), ('CDOShowInfo', 'Writes a text file and returns an output.'), ('PostTest', 'Writes a text file and returns an output.'), ('StatusTestProcess', 'An process to test status responses'), ('WaitForFileDeletionCached', 'An asynchronous job that waits for a file to be deleted'), ('WaitForAllFilesToBeDeleted', 'An asynchronous job that waits for a number of files to be deleted'), ('AsyncTest', 'Does an asynchronous test job run'), ('SyncTest1', 'Just creates a file.'), ('WaitForFileDeletion', 'An asynchronous job that waits for a file to be deleted'), ('ProcessTemplate', 'Writes a text file and returns an output.') ]
class WPSClient(object): """Returns a class where every public method is a WPS process available at the given url. Example: >>> emu = WPSClient(url='<server url>') >>> emu.hello('stranger') 'Hello stranger' """ def __init__( self, url, processes=None, converters=None, username=None, password=None, headers=None, verify=True, cert=None, verbose=False, progress=False, version=WPS_DEFAULT_VERSION, ): """ Args: url (str): Link to WPS provider. config (Config): an instance processes: Specify a subset of processes to bind. Defaults to all processes. converters (dict): Correspondence of {mimetype: class} to convert this mimetype to a python object. username (str): passed to :class:`owslib.wps.WebProcessingService` password (str): passed to :class:`owslib.wps.WebProcessingService` headers (str): passed to :class:`owslib.wps.WebProcessingService` verify (bool): passed to :class:`owslib.wps.WebProcessingService` cert (str): passed to :class:`owslib.wps.WebProcessingService` verbose (str): passed to :class:`owslib.wps.WebProcessingService` progress (bool): If True, enable interactive user mode. version (str): WPS version to use. """ self._converters = converters self._interactive = progress self._mode = ASYNC if progress else SYNC self._notebook = notebook.is_notebook() self._inputs = {} self._outputs = {} if not verify: import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) self._wps = WebProcessingService( url, version=version, username=username, password=password, verbose=verbose, headers=headers, verify=verify, cert=cert, skip_caps=True, ) try: self._wps.getcapabilities() except ServiceException as e: if "AccessForbidden" in str(e): raise UnauthorizedException( "You are not authorized to do a request of type: GetCapabilities" ) raise self._processes = self._get_process_description(processes) # Build the methods for pid in self._processes: setattr(self, sanitize(pid), types.MethodType(self._method_factory(pid), self)) self.logger = logging.getLogger('WPSClient') if progress: self._setup_logging() self.__doc__ = utils.build_wps_client_doc(self._wps, self._processes) def _get_process_description(self, processes): """Return the description for each process. Sends the server a `describeProcess` request for each process. Parameters ---------- processes: str, list, None A process name, a list of process names or None (for all processes). Returns ------- OrderedDict A dictionary keyed by the process identifier of process descriptions. """ all_wps_processes = [p.identifier for p in self._wps.processes] if processes is None: if owslib.__version__ > '0.17.0': # Get the description for all processes in one request. ps = self._wps.describeprocess('all') return OrderedDict((p.identifier, p) for p in ps) else: processes = all_wps_processes # Check for invalid process names, i.e. not matching the getCapabilities response. process_names, missing = utils.filter_case_insensitive( processes, all_wps_processes) if missing: message = "These process names were not found on the WPS server: {}" raise ValueError(message.format(", ".join(missing))) # Get the description for each process. ps = [self._wps.describeprocess(pid) for pid in process_names] return OrderedDict((p.identifier, p) for p in ps) def _setup_logging(self): self.logger.setLevel(logging.INFO) import sys fh = logging.StreamHandler(sys.stdout) fh.setFormatter(logging.Formatter('%(asctime)s: %(message)s')) self.logger.addHandler(fh) def _method_factory(self, pid): """Create a custom function signature with docstring, instantiate it and pass it to a wrapper which will actually call the process. Parameters ---------- pid: str Identifier of the WPS process. Returns ------- func A Python function calling the remote process, complete with docstring and signature. """ process = self._processes[pid] input_defaults = OrderedDict() for inpt in process.dataInputs: iid = sanitize(inpt.identifier) default = getattr(inpt, "defaultValue", None) if inpt.dataType != 'ComplexData' else None input_defaults[iid] = utils.from_owslib(default, inpt.dataType) body = dedent(""" inputs = locals() inputs.pop('self') return self._execute('{pid}', **inputs) """).format(pid=pid) func_builder = FunctionBuilder( name=sanitize(pid), doc=utils.build_process_doc(process), args=["self"] + list(input_defaults), defaults=tuple(input_defaults.values()), body=body, filename=__file__, module=self.__module__, ) self._inputs[pid] = {} if hasattr(process, "dataInputs"): self._inputs[pid] = OrderedDict( (i.identifier, i) for i in process.dataInputs ) self._outputs[pid] = {} if hasattr(process, "processOutputs"): self._outputs[pid] = OrderedDict( (o.identifier, o) for o in process.processOutputs ) func = func_builder.get_func() return func def _execute(self, pid, **kwargs): """Execute the process.""" wps_inputs = [] for name, input_param in self._inputs[pid].items(): value = kwargs.get(sanitize(name)) if value is not None: if isinstance(input_param.defaultValue, ComplexData): encoding = input_param.defaultValue.encoding mimetype = input_param.defaultValue.mimeType if isinstance(value, ComplexData): inp = value else: if utils.is_embedded_in_request(self._wps.url, value): # If encoding is None, this will return the actual encoding used (utf-8 or base64). value, encoding = embed(value, mimetype, encoding=encoding) else: value = fix_url(value) inp = utils.to_owslib(value, data_type=input_param.dataType, encoding=encoding, mimetype=mimetype) else: inp = utils.to_owslib(value, data_type=input_param.dataType) wps_inputs.append((name, inp)) wps_outputs = [ (o.identifier, "ComplexData" in o.dataType) for o in self._outputs[pid].values() ] mode = self._mode if self._processes[pid].storeSupported else SYNC try: wps_response = self._wps.execute( pid, inputs=wps_inputs, output=wps_outputs, mode=mode ) if self._interactive and self._processes[pid].statusSupported: if self._notebook: notebook.monitor(wps_response, sleep=.2) else: self._console_monitor(wps_response) except ServiceException as e: if "AccessForbidden" in str(e): raise UnauthorizedException( "You are not authorized to do a request of type: Execute" ) raise # Add the convenience methods of WPSResult to the WPSExecution class. This adds a `get` method. utils.extend_instance(wps_response, WPSResult) wps_response.attach(wps_outputs=self._outputs[pid], converters=self._converters) return wps_response def _console_monitor(self, execution, sleep=3): """Monitor the execution of a process. Parameters ---------- execution : WPSExecution instance The execute response to monitor. sleep: float Number of seconds to wait before each status check. """ import signal # Intercept CTRL-C def sigint_handler(signum, frame): self.cancel() signal.signal(signal.SIGINT, sigint_handler) while not execution.isComplete(): execution.checkStatus(sleepSecs=sleep) self.logger.info("{} [{}/100] - {} ".format( execution.process.identifier, execution.percentCompleted, execution.statusMessage[:50],)) if execution.isSucceded(): self.logger.info("{} done.".format(execution.process.identifier)) else: self.logger.info("{} failed.".format(execution.process.identifier))
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def __init__(self, WFS_URL=None): if WFS_URL == None: from pygdp.namespaces import WFS_URL wfsUrl = WFS_URL self.wfsUrl = wfsUrl self.wpsUrl = WPS_URL self.version = '1.1.0' self.wps = WebProcessingService(WPS_URL) def WPSgetCapbilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) #pyGDP Submit Feature def dodsReplace(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return _execute_request.dodsReplace(dataSetURI, verbose) def submitFeatureCoverageOPenDAP(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCoverageOPenDAP( geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs, verbose, coverage, self.wfsUrl, outputfname, sleepSecs) def submitFeatureCoverageWCSIntersection(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCoverageWCSIntersection( geoType, dataSetURI, varID, attribute, value, gmlIDs, verbose, coverage, self.wfsUrl, outputfname, sleepSecs) def submitFeatureCategoricalGridCoverage(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', delim='COMMA', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCategoricalGridCoverage( geoType, dataSetURI, varID, attribute, value, gmlIDs, verbose, coverage, delim, self.wfsUrl, outputfname, sleepSecs) def submitFeatureWeightedGridStatistics(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=None, coverage=True, delim='COMMA', stat='MEAN', grpby='STATISTIC', timeStep=False, summAttr=False, weighted=True, outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return fwgs.submitFeatureWeightedGridStatistics( geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs, verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, self.wfsUrl, outputfname, sleepSecs) #pyGDP File Utilities def shapeToZip(self, inShape, outZip=None, allFiles=True): return shape_to_zip.shapeToZip(inShape, outZip=None, allFiles=True) def uploadShapeFile(self, filePath): value = upload_shapefile.uploadShapeFile(filePath) return value #pyGDP WFS Utilities def getTuples(self, shapefile, attribute): return shapefile_id_handle.getTuples(shapefile, attribute) def getShapefiles(self): return shapefile_value_handle.getShapefiles(self.wfsUrl) def getAttributes(self, shapefile): return shapefile_value_handle.getAttributes(shapefile, self.wfsUrl) def getValues(self, shapefile, attribute, getTuples='false', limitFeatures=None): return shapefile_value_handle.getValues(shapefile, attribute, getTuples, limitFeatures, self.wfsUrl) def getGMLIDs(self, shapefile, attribute, value): return shapefile_id_handle.getGMLIDs(shapefile, attribute, value, WFS_URL=self.wfsUrl) def _getFilterID(self, tuples, value): return shapefile_id_handle._getFilterID(tuples, value) def _getFeatureCollectionGeoType(self, geoType, attribute='the_geom', value=None, gmlIDs=None): return _get_geotype._getFeatureCollectionGeoType( geoType, attribute, value, gmlIDs, self.wfsUrl) def _generateRequest(self, dataSetURI, algorithm, method, varID=None, verbose=False): return _webdata_xml_generate._generateRequest(dataSetURI, algorithm, method, varID, verbose) #pyGDP WebData Utilities def getDataLongName(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataLongName(dataSetURI, verbose) def getDataType(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataType(dataSetURI, verbose) def getDataUnits(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataUnits(dataSetURI, verbose) def getDataSetURI(self, anyText=None, CSWURL=CSWURL, BBox=None): """ Searches a given CSW server and returns metadata content for the datasets found. :param anyText: keywords to be passed to CSW get records :type anyText: list or None """ return webdata_handle.getDataSetURI(anyText, CSWURL, BBox) def getTimeRange(self, dataSetURI, varID, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getTimeRange(dataSetURI, varID, verbose) #Pull up docstrings. #dodsReplace.__doc__ = _execute_request.dodsReplace.__doc__ getAttributes.__doc__ = shapefile_value_handle.getAttributes.__doc__ getDataLongName.__doc__ = webdata_handle.getDataLongName.__doc__ getDataSetURI.__doc__ = webdata_handle.getDataSetURI.__doc__ getDataType.__doc__ = webdata_handle.getDataType.__doc__ getDataUnits.__doc__ = webdata_handle.getDataUnits.__doc__ getGMLIDs.__doc__ = shapefile_id_handle.getGMLIDs.__doc__ getShapefiles.__doc__ = shapefile_value_handle.getShapefiles.__doc__ getTimeRange.__doc__ = webdata_handle.getTimeRange.__doc__ getTuples.__doc__ = shapefile_id_handle.getTuples.__doc__ getValues.__doc__ = shapefile_value_handle.getValues.__doc__ shapeToZip.__doc__ = shape_to_zip.shapeToZip.__doc__ submitFeatureCategoricalGridCoverage.__doc__ = feature_coverage.submitFeatureCategoricalGridCoverage.__doc__ submitFeatureCoverageOPenDAP.__doc__ = feature_coverage.submitFeatureCoverageOPenDAP.__doc__ submitFeatureCoverageWCSIntersection.__doc__ = feature_coverage.submitFeatureCoverageWCSIntersection.__doc__ submitFeatureWeightedGridStatistics.__doc__ = fwgs.submitFeatureWeightedGridStatistics.__doc__ uploadShapeFile.__doc__ = upload_shapefile.uploadShapeFile.__doc__
class BirdyCLI(click.MultiCommand): """BirdyCLI is an implementation of :class:`click.MultiCommand`. It adds each process of a Web Processing Service as command to the command-line interface. :param url: URL of the Web Processing Service. :param xml: A WPS GetCapabilities response for testing. """ def __init__(self, name=None, url=None, xml=None, **attrs): click.MultiCommand.__init__(self, name, **attrs) self.url = os.environ.get('WPS_SERVICE') or url self.verify = get_ssl_verify() self.xml = xml self.wps = WebProcessingService(self.url, verify=self.verify, skip_caps=True) self.commands = OrderedDict() def _update_commands(self): if not self.commands: try: self.wps.getcapabilities(xml=self.xml) except SSLError: raise ConnectionError('SSL verfication of server certificate failed. Set WPS_SSL_VERIFY=false.') except Exception: raise ConnectionError("Web Processing Service not available.") for process in self.wps.processes: self.commands[process.identifier] = dict( name=process.identifier, url=self.wps.url, version=process.processVersion, help=BirdyCLI.format_command_help(process), options=[]) def list_commands(self, ctx): ctx.obj = True self._update_commands() return self.commands.keys() def get_command(self, ctx, name): self._update_commands() cmd_templ = template_env.get_template('cmd.py.j2') rendered_cmd = cmd_templ.render(self._get_command_info(name, details=ctx.obj is None or False)) ns = {} code = compile(rendered_cmd, filename='<string>', mode='exec') eval(code, ns, ns) return ns['cli'] def _get_command_info(self, name, details=False): cmd = self.commands.get(name) if details: pp = self.wps.describeprocess(name) for inp in pp.dataInputs: help = inp.title or '' default = BirdyCLI.get_param_default(inp) if default: help = "{}. Default: {}".format(help, default) cmd['options'].append(dict( name=inp.identifier, # default=BirdyCLI.get_param_default(inp), help=help, type=BirdyCLI.get_param_type(inp), multiple=inp.maxOccurs > 1)) return cmd @staticmethod def format_command_help(process): return "{}: {}".format(process.title or process.identifier, process.abstract or '') @staticmethod def get_param_default(param): if 'ComplexData' in param.dataType: # TODO: get default value of complex type default = None elif 'BoundingBoxData' in param.dataType: # TODO: get default value of bbox default = None else: default = getattr(param, 'defaultValue', None) return default @staticmethod def get_param_type(param): if param.dataType is None: param_type = click.STRING elif 'boolean' in param.dataType: param_type = click.BOOL elif 'integer' in param.dataType: param_type = click.INT elif 'float' in param.dataType: param_type = click.FLOAT elif 'ComplexData' in param.dataType: param_type = COMPLEX else: param_type = click.STRING return param_type
class pyGDPwebProcessing(): """ This class allows interactive calls to be made into the GDP. """ def __init__(self, WFS_URL=None): if WFS_URL==None: from pygdp.namespaces import WFS_URL wfsUrl=WFS_URL self.wfsUrl = wfsUrl self.wpsUrl = WPS_URL self.version = '1.1.0' self.wps = WebProcessingService(WPS_URL) def WPSgetCapbilities(self, xml=None): """ Returns a list of capabilities. """ self.wps.getcapabilities(xml) def WPSdescribeprocess(self, identifier, xml=None): """ Returns a list describing a specific identifier/process. """ self.wps.describeprocess(identifier, xml) #pyGDP Submit Feature def dodsReplace(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return _execute_request.dodsReplace(dataSetURI, verbose) def submitFeatureCoverageOPenDAP(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCoverageOPenDAP(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs, verbose, coverage, self.wfsUrl, outputfname, sleepSecs) def submitFeatureCoverageWCSIntersection(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCoverageWCSIntersection(geoType, dataSetURI, varID, attribute, value, gmlIDs, verbose, coverage, self.wfsUrl, outputfname, sleepSecs) def submitFeatureCategoricalGridCoverage(self, geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', delim='COMMA', outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return feature_coverage.submitFeatureCategoricalGridCoverage(geoType, dataSetURI, varID, attribute, value, gmlIDs, verbose, coverage, delim, self.wfsUrl, outputfname, sleepSecs) def submitFeatureWeightedGridStatistics(self, geoType, dataSetURI, varID, startTime, endTime, attribute='the_geom', value=None, gmlIDs=None, verbose=None, coverage=True, delim='COMMA', stat='MEAN', grpby='STATISTIC', timeStep=False, summAttr=False, weighted=True, outputfname=None, sleepSecs=10): if verbose: ch.setLevel(logging.INFO) return fwgs.submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs, verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, self.wfsUrl, outputfname, sleepSecs) #pyGDP File Utilities def shapeToZip(self, inShape, outZip=None, allFiles=True): return shape_to_zip.shapeToZip(inShape, outZip=None, allFiles=True) def uploadShapeFile(self, filePath): value, ntuple = upload_shapefile.uploadShapefile(filePath) return value, ntuple #pyGDP WFS Utilities def getTuples(self, shapefile, attribute): return shapefile_id_handle.getTuples(shapefile, attribute) def getShapefiles(self): return shapefile_value_handle.getShapefiles(self.wfsUrl) def getAttributes(self, shapefile): return shapefile_value_handle.getAttributes(shapefile, self.wfsUrl) def getValues(self, shapefile, attribute, getTuples='false', limitFeatures=None): return shapefile_value_handle.getValues(shapefile, attribute, getTuples, limitFeatures, self.wfsUrl) def getGMLIDs(self, shapefile, attribute, value): return shapefile_id_handle.getGMLIDs(shapefile, attribute, value, WFS_URL=self.wfsUrl) def _getFilterID(self, tuples, value): return shapefile_id_handle._getFilterID(tuples, value) def _getFeatureCollectionGeoType(self, geoType, attribute='the_geom', value=None, gmlIDs=None): return _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, self.wfsUrl) def _generateRequest(self, dataSetURI, algorithm, method, varID=None, verbose=False): return _webdata_xml_generate._generateRequest(dataSetURI, algorithm, method, varID, verbose) #pyGDP WebData Utilities def getDataLongName(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataLongName(dataSetURI, verbose) def getDataType(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataType(dataSetURI, verbose) def getDataUnits(self, dataSetURI, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getDataUnits(dataSetURI, verbose) def getDataSetURI(self, anyText='',CSWURL=CSWURL,BBox=None): return webdata_handle.getDataSetURI(anyText, CSWURL, BBox) def getTimeRange(self, dataSetURI, varID, verbose=False): if verbose: ch.setLevel(logging.INFO) return webdata_handle.getTimeRange(dataSetURI, varID, verbose) #Pull up docstrings. #dodsReplace.__doc__ = _execute_request.dodsReplace.__doc__ getAttributes.__doc__ = shapefile_value_handle.getAttributes.__doc__ getDataLongName.__doc__ = webdata_handle.getDataLongName.__doc__ getDataSetURI.__doc__ = webdata_handle.getDataSetURI.__doc__ getDataType.__doc__ = webdata_handle.getDataType.__doc__ getDataUnits.__doc__ = webdata_handle.getDataUnits.__doc__ getGMLIDs.__doc__ = shapefile_id_handle.getGMLIDs.__doc__ getShapefiles.__doc__ = shapefile_value_handle.getShapefiles.__doc__ getTimeRange.__doc__ = webdata_handle.getTimeRange.__doc__ getTuples.__doc__ = shapefile_id_handle.getTuples.__doc__ getValues.__doc__ = shapefile_value_handle.getValues.__doc__ shapeToZip.__doc__ = shape_to_zip.shapeToZip.__doc__ submitFeatureCategoricalGridCoverage.__doc__ = feature_coverage.submitFeatureCategoricalGridCoverage.__doc__ submitFeatureCoverageOPenDAP.__doc__ = feature_coverage.submitFeatureCoverageOPenDAP.__doc__ submitFeatureCoverageWCSIntersection.__doc__ = feature_coverage.submitFeatureCoverageWCSIntersection.__doc__ submitFeatureWeightedGridStatistics.__doc__ = fwgs.submitFeatureWeightedGridStatistics.__doc__ uploadShapeFile.__doc__ = upload_shapefile.uploadShapeFile.__doc__
def test_owslib(self): """Check that owslib can parse the processes' description.""" from owslib.wps import WebProcessingService wps = WebProcessingService(self.URL, skip_caps=True) wps.getcapabilities(xml=self.cap) wps.describeprocess("all", xml=self.desc)
from owslib.wps import WebProcessingService, WPSExecution, WFSFeatureCollection, WFSQuery, GMLMultiPolygonFeatureCollection, monitorExecution, printInputOutput from owslib.util import dump # instantiate WPS client # setting verbose=True will print out all HTTP request and responses to standard output verbose = False wps = WebProcessingService( 'http://cida.usgs.gov/climate/gdp/process/WebProcessingService', verbose=verbose, skip_caps=True) # 1) GetCapabilities # Submits an HTTP GET "GetCapabilities" request to the WPS service and parses the HTTP response. wps.getcapabilities() # alternatively, read capabilities from XML file (no live request to WPS server) #xml = open('../tests/USGSCapabilities.xml', 'r').read() #wps.getcapabilities(xml=xml) print 'WPS Identification type: %s' % wps.identification.type print 'WPS Identification title: %s' % wps.identification.title print 'WPS Identification abstract: %s' % wps.identification.abstract for operation in wps.operations: print 'WPS Operation: %s' % operation.name for process in wps.processes: print 'WPS Process: identifier=%s title=%s' % (process.identifier, process.title) # 2) DescribeProcess # Submits an HTTP GET "DescribeProcess" request to the WPS service and parses the HTTP response
class BirdyCLI(click.MultiCommand): """BirdyCLI is an implementation of :class:`click.MultiCommand`. It adds each process of a Web Processing Service as command to the command-line interface. :param url: URL of the Web Processing Service. :param xml: A WPS GetCapabilities response for testing. """ def __init__(self, name=None, url=None, xml=None, **attrs): click.MultiCommand.__init__(self, name, **attrs) self.url = os.environ.get('WPS_SERVICE') or url self.verify = get_ssl_verify() self.xml = xml self.wps = WebProcessingService(self.url, verify=self.verify, skip_caps=True) self.commands = OrderedDict() def _update_commands(self): if not self.commands: try: self.wps.getcapabilities(xml=self.xml) except SSLError: raise ConnectionError( 'SSL verfication of server certificate failed. Set WPS_SSL_VERIFY=false.' ) except Exception: raise ConnectionError("Web Processing Service not available.") for process in self.wps.processes: self.commands[process.identifier] = dict( name=process.identifier, url=self.wps.url, version=process.processVersion, help=BirdyCLI.format_command_help(process), options=[]) def list_commands(self, ctx): ctx.obj = True self._update_commands() return self.commands.keys() def get_command(self, ctx, name): self._update_commands() cmd_templ = template_env.get_template('cmd.py.j2') rendered_cmd = cmd_templ.render( self._get_command_info(name, details=ctx.obj is None or False)) ns = {} code = compile(rendered_cmd, filename='<string>', mode='exec') eval(code, ns, ns) return ns['cli'] def _get_command_info(self, name, details=False): cmd = self.commands.get(name) if details: pp = self.wps.describeprocess(name) for inp in pp.dataInputs: help = inp.title or '' default = BirdyCLI.get_param_default(inp) if default: help = "{}. Default: {}".format(help, default) cmd['options'].append( dict( name=inp.identifier, # default=BirdyCLI.get_param_default(inp), help=help, type=BirdyCLI.get_param_type(inp), multiple=inp.maxOccurs > 1)) return cmd @staticmethod def format_command_help(process): return "{}: {}".format(process.title or process.identifier, process.abstract or '') @staticmethod def get_param_default(param): if 'ComplexData' in param.dataType: # TODO: get default value of complex type default = None elif 'BoundingBoxData' in param.dataType: # TODO: get default value of bbox default = None else: default = getattr(param, 'defaultValue', None) return default @staticmethod def get_param_type(param): if param.dataType is None: param_type = click.STRING elif 'boolean' in param.dataType: param_type = click.BOOL elif 'integer' in param.dataType: param_type = click.INT elif 'float' in param.dataType: param_type = click.FLOAT elif 'ComplexData' in param.dataType: param_type = COMPLEX else: param_type = click.STRING return param_type