def __init__(self): inputs = [ ComplexInput('text', 'Text document', abstract='URL pointing to text document', supported_formats=[Format('text/plain')]), ] outputs = [ ComplexOutput('output', 'Word counter result', as_reference=True, supported_formats=[Format('application/json')]), ] super(WordCounter, self).__init__( self._handler, identifier='wordcounter', title='Word Counter', abstract="Counts words in a given text.", version='1.0', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def _handler(self, request, response): response.update_status("starting ...", 0) # run diag response.update_status("running diag ...", 20) # result plot response.update_status("collect output plot ...", 90) response.outputs['output'].output_format = Format('image/png') response.outputs['output'].file = util.diagdata_file( os.path.join('modes_of_variability', 'era_interim_1990-01-2010-01_clusters.png')) response.update_status("done.", 100) return response
def test_complex_output(self): complexo = ComplexOutput('complex', 'Complex foo', [Format('GML')]) doc = complexo.describe_xml() [outpt] = xpath_ns(doc, '/Output') [default] = xpath_ns(doc, '/Output/ComplexOutput/Default/Format/MimeType') supported = xpath_ns( doc, '/Output/ComplexOutput/Supported/Format/MimeType') assert default.text == 'application/gml+xml' assert len(supported) == 1
def __init__(self): self.status_percentage_steps = { "start": 0, "process": 10, "build_output": 95, "complete": 100, } inputs = [ ComplexInput( "netcdf", "Daily NetCDF Dataset", abstract="NetCDF file", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.NETCDF, FORMATS.DODS], ), ComplexInput( "updates_file", "Updates File(yaml)", abstract= "The filepath of an updates file that specifies what to do to the metadata it finds in the NetCDF file", min_occurs=0, max_occurs=1, supported_formats=[ Format( mime_type="text/x-yaml", extension=".yaml", ) ], ), LiteralInput( "updates_string", "Updates String(yaml format)", abstract= "The string in yaml format that specifies what to do to the metadata it finds in the NetCDF file", min_occurs=0, max_occurs=1, data_type="string", ), log_level, ] outputs = [nc_output] super(UpdateMetadata, self).__init__( self._handler, identifier="update_metadata", title="Update NetCDF Metadata", abstract= "Update file containing missing, invalid, or incorrectly named global or variable metadata attributes", store_supported=True, status_supported=True, inputs=inputs, outputs=outputs, )
def __init__(self): """Sample.""" inputs = [ LiteralInput( 'sentinel_products', 'Sentine Product IDs', data_type='string', abstract=""" helper:sentinel_product_select """, ), ComplexInput( 'geojson', 'GeoJSON region', supported_formats=[Format('application/vnd.geo+json')], abstract="GeoJson", mode=MODE.SIMPLE, max_occurs=1), LiteralInput('ranged', 'Ranged Value', abstract=""" Sample of allowed_value usage """, data_type='integer', allowed_values=AllowedValue( allowed_type=ALLOWEDVALUETYPE.RANGE, minval=0, maxval=100), max_occurs=1) ] outputs = [ LiteralOutput( 'none', 'Nothing', data_type='string', abstract=""" Empty ouput. """, ) ] super(InputDemos, self).__init__( identifier='input_demos', abstract=""" Input demonstrators. """, version='0.1', title="Input Demos", profile='', metadata=[Metadata('Sample'), Metadata('Input')], inputs=inputs, outputs=outputs, )
def __init__(self): """Sample.""" inputs = [ ComplexInput( 'search_polygon', 'GeoJSON region', supported_formats=[Format('application/vnd.geo+json')], abstract=""" GeoJson of region to search """, mode=MODE.SIMPLE, max_occurs=1), LiteralInput('start_date', 'Start date', abstract=""" Datestamp in format YYYYMMDD """, data_type='integer', max_occurs=1), LiteralInput('end_date', 'End date', abstract=""" Datestamp in format YYYYMMDD """, data_type='integer', max_occurs=1) ] outputs = [ LiteralOutput( 'output_dir', 'Workflow data volume path', data_type='string', abstract=""" Path to a directory within the Workflow Data volume. The service will store all outputs in this dir, then provide a reference to the directory which other services can use. """, ) ] super(SentinelDownload, self).__init__( identifier='acquisition:sentinel1', abstract=""" Use sentinelsat python module to download Sentinel 1 data """, version='0.1', title="Download Sentinel 1 Data (referencing data volume)", metadata=[Metadata('Testing')], profile='', inputs=inputs, outputs=outputs, )
def _handler(self, request, response): response.update_status("starting ...", 0) # build esgf search constraints constraints = dict( model=request.inputs['model'][0].data, experiment=request.inputs['experiment'][0].data, time_frequency='mon', cmor_table='Amon', ensemble=request.inputs['ensemble'][0].data, ) # generate recipe response.update_status("generate recipe ...", 10) recipe_file, config_file = runner.generate_recipe( workdir=self.workdir, diag='py_demo', constraints=constraints, start_year=request.inputs['start_year'][0].data, end_year=request.inputs['end_year'][0].data, output_format='png', ) # run diag response.update_status("running diag ...", 20) logfile, plot_dir, work_dir = runner.run(recipe_file, config_file) # recipe output response.outputs['recipe'].output_format = FORMATS.TEXT response.outputs['recipe'].file = recipe_file # log output response.outputs['log'].output_format = FORMATS.TEXT response.outputs['log'].file = logfile # result plot response.update_status("collect output plot ...", 90) response.outputs['output'].output_format = Format('application/png') response.outputs['output'].file = runner.get_output( plot_dir, path_filter=os.path.join('diagnostic1', 'script1'), name_filter="CMIP5*", output_format="png") response.outputs['data'].output_format = FORMATS.NETCDF response.outputs['data'].file = runner.get_output( work_dir, path_filter=os.path.join('diagnostic1', 'script1'), name_filter="CMIP5*", output_format="nc") response.update_status("done.", 100) return response
def outputs_from_plot_names(plotlist): plots = [] for plot in plotlist: plots.append( ComplexOutput( '{}_plot'.format(plot.lower()), '{} plot'.format(plot), abstract='Generated {} plot of ESMValTool processing.'.format( plot), as_reference=True, supported_formats=[Format('image/png')])) return plots
def __init__(self): inputs = [ ComplexInput('layer', 'Layer', supported_formats=[Format('application/gml+xml')]) ] outputs = [ ComplexOutput('out', 'Referenced Output', supported_formats=[Format('application/json')]) ] super(Centroids, self).__init__(self._handler, identifier='centroids', title='Process Centroids', abstract='Returns a GeoJSON \ with centroids of features from an uploaded GML.', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def build(self): identifier = self.metadata.get('identifier') title = self.metadata.get('title') if self.metadata.get('format'): parameter_format = getattr(FORMATS, self.metadata.get('format')) parameter = self.complex(identifier, title, [Format(parameter_format.mime_type)]) else: parameter = self.literal(identifier, title, data_type=self.metadata.get('dataType')) return parameter
def get_outputs(self, result, response): # result plot response.update_status("collecting output ...", 80) varlist = ['tas', 'psl', 'pr', 'sst'] for var in varlist: key = "{}_trend_ann_plot".format(var.lower()) response.outputs[key].output_format = Format('application/png') response.outputs[key].file = runner.get_output( result['work_dir'], # Yes, it's in the work dir path_filter=os.path.join('diagnostic1', 'cvdp'), name_filter="{}.trends.ann".format(var.lower()), output_format="png")
def __init__(self): inputs = [ ComplexInput( "sims", "NetCDF containing q_sim and q_obs for model calibration fit check.", abstract="Stream flow simulation time series", supported_formats=[FORMATS.NETCDF], ), ] outputs = [ ComplexOutput( "graph_objfun_fit", "Figure showing the observed and simulated streamflows", abstract="", as_reference=True, supported_formats=(Format(mime_type="image/png"), ), ), ComplexOutput( "graph_objfun_annual_fit", "Figure showing the fit on the mean annual hydrograph.", abstract="", as_reference=True, supported_formats=(Format(mime_type="image/png"), ), ), ] super(GraphObjectiveFunctionFitProcess, self).__init__( self._handler, identifier="graph_objective_function_fit", title="", version="1.0", abstract="", metadata=[], inputs=inputs, outputs=outputs, keywords=[], status_supported=True, store_supported=True, )
def __init__(self): inputs = [ LiteralInput('count', 'Number of output files', abstract='The number of generated output files.', data_type='integer', default='1', allowed_values=[1, 2, 5, 10]) ] outputs = [ ComplexOutput('output', 'Output', abstract='Text document with dummy content.', as_reference=True, supported_formats=[Format('text/plain')]), ComplexOutput( 'reference', 'Output References', abstract='Document with references to produced output files.', as_reference=True, supported_formats=[Format('application/json')]), ] super(MultipleOutputs, self).__init__( self._handler, identifier='multiple_outputs', title='Multiple Outputs', abstract='Produces multiple files and returns a document' ' with references to these files.', metadata=[ Metadata( 'User Guide', 'https://emu.readthedocs.io/en/latest/processes.html' ), # noqa ], version='1.0', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def __init__(self): self.variables = ['ta', 'ua', 'va', 'zg', 'hus', 'tas', 'ts', 'pr', 'clt', 'rlut', 'rsut'] self.frequency = 'mon' inputs = [] outputs = [ ComplexOutput('rmsd', 'RMSD metric', abstract='RMSD metric.', as_reference=True, supported_formats=[Format('image/png')]), ComplexOutput('archive', 'Archive', abstract='The complete output of the ESMValTool processing as an zip archive.', as_reference=True, supported_formats=[Format('application/zip')]), *default_outputs(), ] super(Perfmetrics, self).__init__( self._handler, identifier="perfmetrics", title="Performance metrics for essential climate parameters", version=runner.VERSION, abstract="""The goal is to create a standard recipe for the calculation of performance metrics to quantify the ability of the models to reproduce the climatological mean annual cycle for selected Essential Climate Variables (ECVs) plus some additional corresponding diagnostics and plots to better understand and interpret the results.""", metadata=[ Metadata('ESMValTool', 'http://www.esmvaltool.org/'), Metadata('Documentation', 'https://esmvaltool.readthedocs.io/en/version2_development/recipes/recipe_perfmetrics.html', role=util.WPS_ROLE_DOC), Metadata('Estimated Calculation Time', '20 Minutes'), ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True, )
def __init__(self): inputs = [ ComplexInput( "workflow", "Workflow", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.JSON], ), ] outputs = [ ComplexOutput( "output", "METALINK v4 output", abstract= "Metalink v4 document with references to NetCDF files.", as_reference=True, supported_formats=[FORMATS.META4], ), ComplexOutput( "prov", "Provenance", abstract="Provenance document using W3C standard.", as_reference=True, supported_formats=[FORMATS.JSON], ), ComplexOutput( "prov_plot", "Provenance Diagram", abstract="Provenance document as diagram.", as_reference=True, supported_formats=[ Format("image/png", extension=".png", encoding="base64") ], ), ] super(Orchestrate, self).__init__( self._handler, identifier="orchestrate", title="Orchestrate", abstract= "Run a workflow with combined operations. A workflow can be build using the rooki client.", metadata=[ Metadata("Rooki", "https://github.com/roocs/rooki"), ], version="1.0", inputs=inputs, outputs=outputs, store_supported=True, status_supported=True, )
def _handler(self, request, response): response.update_status(f"PyWPS process {self.identifier} started.", 0) model = self.model(request) # Model configuration (zipped RV files in `conf` input) if "conf" in request.inputs: model.configure(self.get_config(request).values()) # Initial conditions (`rvc` input) if "rvc" in request.inputs: model.resume(request.inputs.pop("rvc")[0].file) if "random_numbers" in request.inputs: model.config.set_rv_file( request.inputs.pop("random_numbers")[0].file) # Input data files ts = self.meteo(request) # Model options kwds = self.options(request) # Launch model with input files try: self.run(model, ts, kwds) except Exception as exc: LOGGER.exception(exc) err_msg = traceback.format_exc() # By default the error message is limited to 300 chars and strips # many special characters raise ProcessError(err_msg, max_length=len(err_msg), allowed_chars=string.printable) from exc # Store output files name. If an output counts multiple files, they'll be zipped. for key in response.outputs.keys(): val = model.outputs.get(key) if val is not None: if isinstance(response.outputs[key], LiteralOutput): response.outputs[key].data = str(val) else: response.outputs[key].file = str(val) if val.suffix == ".zip": response.outputs[key].data_format = Format( "application/zip", extension=".zip", encoding="base64") else: response.outputs[key].data = "" return response
def __init__(self): inputs = [ LiteralInput('model', 'Model', abstract="Climate model ID", default='MPI-ESM-LR', data_type='string', allowed_values=ALLOWED_VALUES['model']), LiteralInput('experiment', 'Experiment', abstract="Experiment name", default='rcp45', data_type='string', allowed_values=ALLOWED_VALUES['experiment']), LiteralInput('variable', 'Variable', abstract="Variable ID", default='tas', data_type='string', allowed_values=ALLOWED_VALUES['variable']), LiteralInput('start_year', 'Start Year', abstract="4-digit start year", default='2010', data_type='integer'), LiteralInput('end_year', 'End Year', abstract="4-digit end year", default='2020', data_type='integer'), ] outputs = [ ComplexOutput('output', 'Output plot', abstract='Generated timeseries plot.', as_reference=True, supported_formats=[Format('image/png')]) ] super(TimeSeriesPlot, self).__init__( self._handler, identifier='tsplot', version='1.1.1', title='CMIP5 Global Mean Time Series', abstract='Uses the CliMAF tool to calculate a time series of global mean values' ' for a variable, model, experiment and ensemble member from the CMIP5 archive.' ' The time series is plotted as a line graph showing change ' ' in the global mean value against time.', profile='', metadata=[ Metadata('CliMAF', 'http://climaf.readthedocs.io/en/latest/'), Metadata('Documentation', 'https://climaf-wps-demo.readthedocs.io/en/latest/processes.html#tsplot', role=util.WPS_ROLE_DOC), Metadata('Media', 'https://climaf-wps-demo.readthedocs.io/en/latest/_static/media/tsplot_thumbnail.png', role=util.WPS_ROLE_MEDIA), ], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True )
def create_sum_one(): def sum_one(request, response): input = request.inputs['input'] # What do we need to assert a Complex input? #assert type(input) is text_type sys.path.append("/usr/lib/grass64/etc/python/") import grass.script as grass # Import the raster and set the region if grass.run_command("r.in.gdal", flags="o", out="input", input=input) != 0: raise NoApplicableCode( "Could not import cost map. Please check the WCS service.") if grass.run_command("g.region", flags="ap", rast="input") != 0: raise NoApplicableCode("Could not set GRASS region.") # Add 1 if grass.mapcalc("$output = $input + $value", output="output", input="input", value=1.0) != 0: raise NoApplicableCode("Could not set GRASS region.") # Export the result out = "./output.tif" if grass.run_command( "r.out.gdal", input="output", type="Float32", output=out) != 0: raise NoApplicableCode("Could not export result from GRASS.") response.outputs['output'] = out return response return Process(handler=sum_one, identifier='sum_one', title='Process Sum One', inputs=[ComplexInput('input', [Format('image/img')])], outputs=[ComplexOutput('output', [Format('image/tiff')])])
def __init__(self): ''' Description of inputs and outputs goes here. ''' inputs = [ ComplexInput('layer', 'Layer', supported_formats=[Format('application/gml+xml')]) ] outputs = [ ComplexOutput('out', 'Referenced Output', supported_formats=[Format('application/gml+xml')]) ] super(MyProcess, self).__init__(self._handler, identifier='identifier', title='Process title', abstract='Process Abstract', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def __init__(self): inputs = [ComplexInput('layer', 'Layer', [Format('application/gml+xml')])] outputs = [LiteralOutput('area', 'Area', data_type='string')] super(Area, self).__init__( self._handler, identifier='area', title='Process Area', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True )
def __init__(self): inputs = [ ComplexInput('resource', 'Resource', abstract="NetCDF Files or archive (tar/zip) containing netCDF files", min_occurs=1, max_occurs=1000, # maxmegabites=5000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), LiteralInput("region", "Region", # abstract= countries_longname(), # need to handle special non-ascii char in countries. default='DEU', data_type='string', min_occurs=1, max_occurs=len(countries()), allowed_values=countries()), # REGION_EUROPE #COUNTRIES ] ########### # OUTPUTS ########### outputs = [ ComplexOutput('output_nc', "Subsets", abstract="Tar archive containing the netCDF files", as_reference=True, supported_formats=[Format("application/x-tar")], ), ComplexOutput('output_factsheet', "Climate Fact Sheet", abstract="Short overview of the climatological situation of the selected countries", as_reference=True, supported_formats=[Format('application/pdf')], ), ComplexOutput('output_log', 'Logging information', abstract="Collected logs during process run.", as_reference=True, supported_formats=[Format("text/plain")]), ] super(FactsheetProcess, self).__init__( self._handler, identifier="climatefactsheet", title="Climate Fact Sheet Generator (init)", version="0.2", abstract="Returns a pdf with a short overview of the climatological situation for the selected countries", metadata=[ # {"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"}, {"title": "Doc", "href": "http://flyingpigeon.readthedocs.io/en/latest/"}, ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True )
def _handler(self, request, response): ts_fn = request.inputs['ts'][0].file p_fn = request.inputs['params'][0].file v = request.inputs['variable'][0].data format = request.inputs['format'][0].data # Create and save graphics ds = xr.open_dataset(ts_fn) if v == '': v = list(ds.data_vars.keys())[0] ts = ds[v] p = xr.open_dataset(p_fn)['params'] # Name of variable is hard-coded fig = ts_fit_graph(ts, p) if format == 'plotly': # This is not working great with this figure due to the twin axes. raise NotImplementedError # Create plotly object # obj = mpl_to_plotly(fig) # Convert to JSON # response.outputs['graph_fit'].data = obj.to_json() # response.outputs['graph_fit'].data_format = Format('application/json') else: fig_fn = Path(self.workdir) / ('ts_fit.' + format) fig.savefig(fig_fn, format=format) plt.close(fig) response.outputs['graph_fit'].file = str(fig_fn) if format in ['png', 'jpeg']: response.outputs['graph_fit'].data_format = Format( 'image/{}'.format(format)) elif format == 'pdf': response.outputs['graph_fit'].data_format = Format( 'application/pdf') return response
def __init__(self): inputs = [ ComplexInput('datafile', 'NetCDF datafile', supported_formats=[Format('application/x-netcdf')], min_occurs=1, max_occurs=1, mode=MODE.STRICT), ComplexInput( 'shapefile', '.zip file representing ESRI Shapefile of geometry to use for subset', supported_formats=[Format('application/zip')], min_occurs=1, max_occurs=1, # NOTE: No validator for ZIP files mode=MODE.NONE), LiteralInput('variable', 'Variable to subset', data_type='string'), ] outputs = [ ComplexOutput('output', 'Output data', as_reference=True, supported_formats=[Format('text/plain')]), ] super(SpatialSubsetNetcdf, self).__init__( self._handler, identifier='spatial_subset_netcdf', title='NetCDF data spatial subset', abstract= "Subsets a given NetCDF dataset with given spatial data/geometry", version='1', metadata=[], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def create_complex_nc_process(): def complex_proces(request, response): from pywps.dependencies import netCDF4 as nc url = request.inputs['dods'][0].url with nc.Dataset(url) as D: response.outputs['conventions'].data = D.Conventions response.outputs['outdods'].url = url return response return Process( handler=complex_proces, identifier='my_opendap_process', title='Opendap process', inputs=[ ComplexInput( 'dods', 'Opendap input', supported_formats=[ Format('DODS'), Format('NETCDF'), ], # mode=MODE.STRICT ) ], outputs=[ LiteralOutput( 'conventions', 'NetCDF convention', ), ComplexOutput('outdods', 'Opendap output', supported_formats=[ Format('DODS'), Format('NETCDF'), ], as_reference=True) ])
def __init__(self): inputs = [ ComplexInput('sims', 'Stream flow simulations ensemble', abstract='Stream flow simulation time series', supported_formats=[ FORMATS.NETCDF, Format(mime_type='application/zip') ]), ] outputs = [ ComplexOutput( 'graph_ensemble_hydrographs', 'Figure showing the simple hydrographs of the included models.', abstract="", as_reference=True, supported_formats=(Format(mime_type='image/png'), )), ComplexOutput( 'graph_annual_hydrographs', 'Figure showing the spread for the mean annual hydrograph.', abstract="", as_reference=True, supported_formats=(Format(mime_type='image/png'), )), ] super(GraphEnsUncertaintyProcess, self).__init__(self._handler, identifier="graph_ensemble_uncertainty", title="", version="1.0", abstract="", metadata=[], inputs=inputs, outputs=outputs, keywords=[], status_supported=True, store_supported=True)
def __init__(self): inputs = [ComplexInput('poly_in', 'Input1', supported_formats=[Format('application/gml+xml')], mode=MODE.STRICT), LiteralInput('buffer', 'Buffer', data_type='float', allowed_values=(0, 1, 10, (10, 10, 100), (100, 100, 1000)))] outputs = [ComplexOutput('buff_out', 'Buffered', supported_formats=[Format('application/gml+xml')])] super(Buffer, self).__init__( self._handler, identifier='buffer', version='0.1', title="Brauni's 1st process", abstract='This process is the best ever being coded', profile='', #wsdl='', metadata=['Process', '1st', 'Hilarious'], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True )
def test_complex_output(self): complexo = ComplexOutput('complex', 'Complex foo', [Format('GML')], keywords=['kw1', 'kw2']) doc = complexo.describe_xml() [outpt] = xpath_ns(doc, '/Output') [default] = xpath_ns(doc, '/Output/ComplexOutput/Default/Format/MimeType') supported = xpath_ns(doc, '/Output/ComplexOutput/Supported/Format/MimeType') assert default.text == 'application/gml+xml' assert len(supported) == 1 [keywords] = xpath_ns(doc, '/Output/ows:Keywords') kws = xpath_ns(keywords, './ows:Keyword') assert keywords is not None assert len(kws) == 2
def __init__(self): inputs = [ ComplexInput('MDG', 'MDG', [Format('image/tiff; subtype=geotiff')]) ] outputs = [ ComplexOutput('GTC', '', [Format('application/x-ogc-wcs; version=2.0')]) ] super(L1D, self).__init__( self._handler, identifier='L1D', version='None', title='L1D Processor', abstract='', profile='', metadata=[Metadata('Level L1D'), Metadata('Processor')], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def get_outputs(self, result, response): # result plot response.update_status("collecting output ...", 80) response.outputs['spi_plot'].output_format = Format('application/png') response.outputs['spi_plot'].file = runner.get_output(result['plot_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="histplot", output_format="png") response.outputs['spei_plot'].output_format = Format('application/png') response.outputs['spei_plot'].file = runner.get_output(result['plot_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="histplot", output_format="png") response.outputs['spi_model'].output_format = FORMATS.NETCDF response.outputs['spi_model'].file = runner.get_output(result['work_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="CMPI5*spi*", output_format="nc") response.outputs['spi_reference'].output_format = FORMATS.NETCDF response.outputs['spi_reference'].file = runner.get_output(result['work_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="OBS*spi*", output_format="nc") response.outputs['spei_model'].output_format = FORMATS.NETCDF response.outputs['spei_model'].file = runner.get_output(result['work_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="CMPI5*spei*", output_format="nc") response.outputs['spei_reference'].output_format = FORMATS.NETCDF response.outputs['spei_reference'].file = runner.get_output(result['work_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="OBS*spei*", output_format="nc")
def __init__(self): inputs = [ ComplexInput('dataset', 'Dataset', supported_formats=[Format('application/x-netcdf')], default=AIR_DS, abstract='Example: {0}'.format(AIR_DS)), ComplexInput('text', 'Text', supported_formats=[Format('text/plain')]), LiteralInput('variable', 'Variable', data_type='string', default='air', abstract='Enter the variable name.'), ] outputs = [ ComplexOutput('output', 'Simple Plot', supported_formats=[Format('image/png')], as_reference=True), ComplexOutput('text', 'Text', supported_formats=[Format('text/plain')], as_reference=True), ] super(SimplePlot, self).__init__(self._handler, identifier='simple_plot', title='Simple Plot', abstract='Returns a nice and simple plot.', version='1.0', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)