def create_mimetype_process(): def _handler(request, response): response.outputs['mimetype'].data = response.outputs[ 'mimetype'].data_format.mime_type return response frmt_txt = Format(mime_type='text/plain') frmt_txt2 = Format(mime_type='text/plain+test') return Process(handler=_handler, identifier='get_mimetype_process', title='Get mimeType process', inputs=[], outputs=[ ComplexOutput('mimetype', 'mimetype of requested output', supported_formats=[frmt_txt, frmt_txt2]) ])
def test_complex_input_default_and_supported(self): complex_in = ComplexInput( 'foo', 'Complex foo', supported_formats=[ Format('a/b'), Format('c/d') ] ) doc = complex_in.describe_xml() [default_format] = xpath_ns(doc, './ComplexData/Default/Format') [default_mime_el] = xpath_ns(default_format, './MimeType') self.assertEqual(default_mime_el.text, 'a/b') supported_mime_types = [] for supported_el in xpath_ns(doc, './ComplexData/Supported/Format'): [mime_el] = xpath_ns(supported_el, './MimeType') supported_mime_types.append(mime_el.text) self.assertEqual(supported_mime_types, ['a/b', 'c/d'])
def _handler(self, request, response): response.update_status("starting ...", 0) # build esgf search constraints constraints = dict() options = dict() # generate recipe response.update_status("generate recipe ...", 10) recipe_file, config_file = runner.generate_recipe( workdir=self.workdir, diag='capacity_factor_wp7', constraints=constraints, options=options, start_year=1980, end_year=2005, output_format='png', ) # recipe output response.outputs['recipe'].output_format = FORMATS.TEXT response.outputs['recipe'].file = recipe_file # run diag response.update_status("running diagnostic ...", 20) result = runner.run(recipe_file, config_file) response.outputs['success'].data = result['success'] # log output response.outputs['log'].output_format = FORMATS.TEXT response.outputs['log'].file = result['logfile'] # debug log output response.outputs['debug_log'].output_format = FORMATS.TEXT response.outputs['debug_log'].file = result['debug_logfile'] if not result['success']: LOGGER.exception('esmvaltool failed!') response.update_status("exception occured: " + result['exception'], 100) return response try: self.get_outputs(result, response) except Exception as e: response.update_status("exception occured: " + str(e), 85) response.update_status("creating archive of diagnostic result ...", 90) response.outputs['archive'].output_format = Format('application/zip') response.outputs['archive'].file = runner.compress_output( os.path.join(self.workdir, 'output'), 'diagnostic_result.zip') response.update_status("done.", 100) return response
def create_complex_nc_process(): def complex_proces(request, response): from pywps.dependencies import netCDF4 as nc url = request.inputs['dods'][0].url with nc.Dataset(url) as D: response.outputs['conventions'].data = D.Conventions response.outputs['outdods'].url = url response.outputs['ncraw'].file = os.path.join(DATA_DIR, 'netcdf', 'time.nc') response.outputs['ncraw'].data_format = FORMATS.NETCDF return response return Process( handler=complex_proces, identifier='my_opendap_process', title='Opendap process', inputs=[ ComplexInput( 'dods', 'Opendap input', supported_formats=[Format('DODS'), Format('NETCDF')], # mode=MODE.STRICT ) ], outputs=[ LiteralOutput( 'conventions', 'NetCDF convention', ), ComplexOutput('outdods', 'Opendap output', supported_formats=[ FORMATS.DODS, ], as_reference=True), ComplexOutput('ncraw', 'NetCDF raw data output', supported_formats=[ FORMATS.NETCDF, ], as_reference=False) ])
def __init__(self): inputs = [ ComplexInput( 'dataset', 'Dataset', abstract='You may provide a URL or upload a NetCDF file.', min_occurs=0, max_occurs=100, supported_formats=[Format('application/x-netcdf')]), LiteralInput( 'dataset_opendap', 'Remote OpenDAP Data URL', data_type='string', abstract="Or provide a remote OpenDAP data URL," " for example:" " http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis2.dailyavgs/surface/mslp.2016.nc", # noqa metadata=[ Metadata( 'application/x-ogc-dods', 'https://www.iana.org/assignments/media-types/media-types.xhtml' ) ], min_occurs=0, max_occurs=100) ] outputs = [ LiteralOutput('output', 'Output response', abstract='A summary report.', data_type='string') ] super(ESGFDemo, self).__init__( self._handler, identifier='esgf_demo', title='ESGF Demo', abstract= 'Shows how to use WPS metadata for processes using ESGF data.', metadata=[ Metadata( 'User Guide', 'https://emu.readthedocs.io/en/latest/processes.html' ), # noqa Metadata( 'ESGF Constraints', role= 'https://www.earthsystemcog.org/spec/esgf_search/4.12.0/def/constraints', # noqa href= 'http://esgf-data.dkrz.de/esg-search/search?project=CMIP5&time_frequency=mon&variable=tas,tasmax,tasmin&experiment=historical' ), # noqa ], version='1.0', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def __init__(self): inputs = [ ComplexInput( 'csv', 'Data in CSV format, with variables in the first row', supported_formats=[Format('text/csv')], min_occurs=1, max_occurs=1, # There is no CSV validator, so we have to use None mode=MODE.NONE), LiteralInput('variable', 'Variable to plot', data_type='string', min_occurs=1, max_occurs=4, mode=MODE.SIMPLE), LiteralInput('title', 'Title of plot', data_type='string', min_occurs=0, max_occurs=1, mode=MODE.SIMPLE), ] outputs = [ ComplexOutput('output', 'Output data', as_reference=True, supported_formats=[Format('image/png')]), ] super(ExploratoryDataDensity, self).__init__( self._handler, identifier='exploratory_data_density', title='Exploratory data: Density plot', abstract= 'Generates density plots from one or more variables in the provided CSV dataset', version='1', metadata=[], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def get_outputs(self, result, response): response.update_status("collecting output ...", 80) for plot, _ in self.plotlist: key = '{}_plot'.format(plot.lower()) response.outputs[key].output_format = Format('application/png') response.outputs[key].file = runner.get_output( result['plot_dir'], path_filter=os.path.join('extreme_events', 'main'), name_filter="{}*".format(plot), output_format="png")
def __init__(self): inputs = [ ComplexInput('layer', 'Layer', supported_formats=[Format('application/gml+xml')]) ] outputs = [ ComplexOutput('out', 'Referenced Output', supported_formats=[Format('application/json')]) ] super(Centroids, self).__init__(self._handler, identifier='centroids', title='Process Centroids', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def test_complex_output(self): complexo = ComplexOutput('complex', 'Complex foo', [Format('GML')]) doc = complexo.describe_xml() [outpt] = xpath_ns(doc, '/Output') [default] = xpath_ns(doc, '/Output/ComplexOutput/Default/Format/MimeType') supported = xpath_ns(doc, '/Output/ComplexOutput/Supported/Format/MimeType') assert default.text == 'application/gml+xml' assert len(supported) == 1
def get_outputs(self, result, response): # result plot response.update_status("collecting output ...", 80) response.outputs['spi_plot'].output_format = Format('application/png') response.outputs['spi_plot'].file = runner.get_output( result['plot_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="histplot", output_format="png") response.outputs['spei_plot'].output_format = Format('application/png') response.outputs['spei_plot'].file = runner.get_output( result['plot_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="histplot", output_format="png") response.outputs['spi_model'].output_format = FORMATS.NETCDF response.outputs['spi_model'].file = runner.get_output( result['work_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="CMPI5*spi*", output_format="nc") response.outputs['spi_reference'].output_format = FORMATS.NETCDF response.outputs['spi_reference'].file = runner.get_output( result['work_dir'], path_filter=os.path.join('diagnostic', 'spi'), name_filter="OBS*spi*", output_format="nc") response.outputs['spei_model'].output_format = FORMATS.NETCDF response.outputs['spei_model'].file = runner.get_output( result['work_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="CMPI5*spei*", output_format="nc") response.outputs['spei_reference'].output_format = FORMATS.NETCDF response.outputs['spei_reference'].file = runner.get_output( result['work_dir'], path_filter=os.path.join('diagnostic', 'spei'), name_filter="OBS*spei*", output_format="nc")
def __init__(self): inputs = [ ComplexInput( "sims", "Stream flow simulations ensemble", abstract="Stream flow simulation time series", supported_formats=[ FORMATS.NETCDF, Format(mime_type="application/zip") ], ), ] outputs = [ ComplexOutput( "graph_ensemble_hydrographs", "Figure showing the simple hydrographs of the included models.", abstract="", as_reference=True, supported_formats=(Format(mime_type="image/png"), ), ), ComplexOutput( "graph_annual_hydrographs", "Figure showing the spread for the mean annual hydrograph.", abstract="", as_reference=True, supported_formats=(Format(mime_type="image/png"), ), ), ] super(GraphEnsUncertaintyProcess, self).__init__( self._handler, identifier="graph_ensemble_uncertainty", title="", version="1.0", abstract="", metadata=[], inputs=inputs, outputs=outputs, keywords=[], status_supported=True, store_supported=True, )
def __init__(self): inputs = [ ComplexInput('resource', 'Resource', abstract="NetCDF Files or archive (tar/zip) containing netCDF files.", min_occurs=1, max_occurs=1000, # maxmegabites=5000, supported_formats=[Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ] ) ] outputs = [ ComplexOutput("output", "Fetched Files", abstract="File containing the local pathes to downloades files.", supported_formats=[Format('text/plain')], as_reference=True, ), ComplexOutput("output_log", "Logging information", abstract="Collected logs during process run.", supported_formats=[Format("text/plain")], as_reference=True, ) ] super(FetchProcess, self).__init__( self._handler, identifier="fetch_resources", title="Fetch Resources", version="0.10", abstract="Fetch data resources (limited to 50GB) to the local file" "system of the birdhouse compute provider.", metadata=[ Metadata('Documentation', 'http://flyingpigeon.readthedocs.io/en/latest/'), ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True, )
def get_outputs(self, result, response): # result plot response.update_status("collecting output ...", 80) response.outputs['smpi'].output_format = Format('application/png') response.outputs['smpi'].file = runner.get_output( result['plot_dir'], path_filter=os.path.join('collect', 'SMPI'), name_filter="SMPI", output_format="png", )
def __init__(self): self.variables = ['ta', 'ua', 'va', 'zg', 'hus', 'tas', 'ts', 'pr', 'clt', 'rlut', 'rsut'] self.frequency = 'mon' inputs = [] outputs = [ ComplexOutput('rmsd', 'RMSD metric', abstract='RMSD metric.', as_reference=True, supported_formats=[Format('image/png')]), ComplexOutput('archive', 'Archive', abstract='The complete output of the ESMValTool processing as an zip archive.', as_reference=True, supported_formats=[Format('application/zip')]), *default_outputs(), ] super(Perfmetrics, self).__init__( self._handler, identifier="perfmetrics", title="Performance metrics for essential climate parameters", version=runner.VERSION, abstract="""The goal is to create a standard recipe for the calculation of performance metrics to quantify the ability of the models to reproduce the climatological mean annual cycle for selected Essential Climate Variables (ECVs) plus some additional corresponding diagnostics and plots to better understand and interpret the results. As this process is meant to provide an authoritative answer, changes of settings by users is not supported. Running of this metric is provided as a means to verify the supplied output, and to re-calculate the metric in case the underlying datasets have been updated. The estimated calculation time of this process is 20 minutes.""", metadata=[ Metadata('ESMValTool', 'http://www.esmvaltool.org/'), Metadata('Documentation', 'https://esmvaltool.readthedocs.io/en/v2.0a2/recipes/recipe_perfmetrics.html', role=util.WPS_ROLE_DOC), ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True, )
def create_sum_one(): def sum_one(request, response): input = request.inputs['input'][0].file # What do we need to assert a Complex input? # assert type(input) is text_type import grass.script as grass # Import the raster and set the region if grass.run_command( "r.in.gdal", flags="o", out="input", input=input, quiet=True) != 0: raise NoApplicableCode("Could not import cost map. " "Please check the WCS service.") if grass.run_command("g.region", flags="a", rast="input") != 0: raise NoApplicableCode("Could not set GRASS region.") # Add 1 if grass.mapcalc("$output = $input + $value", output="output", input="input", value=1.0, quiet=True): raise NoApplicableCode("Could not use GRASS map calculator.") # Export the result _, out = tempfile.mkstemp() os.environ['GRASS_VERBOSE'] = '-1' if grass.run_command("r.out.gdal", flags="f", input="output", type="UInt16", output=out, overwrite=True) != 0: raise NoApplicableCode("Could not export result from GRASS.") del os.environ['GRASS_VERBOSE'] response.outputs['output'].file = out return response return Process(handler=sum_one, identifier='sum_one', title='Process Sum One', inputs=[ ComplexInput('input', title='Input', supported_formats=[Format('image/img')]) ], outputs=[ ComplexOutput('output', title='Output', supported_formats=[get_format('GEOTIFF')]) ], grass_location='epsg:4326')
def __init__(self): inputs = [ LiteralInput( 'dataset', 'Dataset', data_type='string', default= '/usr/local/ophidia/ECAS/data/repository/E63_kp40_198001_199012_T2m_daymin_merged.nc' ), # LiteralInput('model', 'Model', # default='HadGEM2-ES', data_type='string', # allowed_values=ALLOWED_VALUES['model']), # LiteralInput('experiment', 'Experiment', # default='rcp45', data_type='string', # allowed_values=ALLOWED_VALUES['experiment']), # LiteralInput('start_year', 'Start Year', # default='2010', data_type='integer'), # LiteralInput('end_year', 'End Year', # default='2020', data_type='integer'), ] outputs = [ ComplexOutput('output', 'Output plot', abstract='Map of Trophical Nights', as_reference=True, supported_formats=[Format('image/png')]) ] super(TropicalNights, self).__init__( self._handler, identifier='tropical_nights', version='1.0', title='Tropical Nights', abstract='Computes the Tropical Nights index: ' 'starting from the daily minimum temperature (1980-1990) TN, ' 'the Tropical Nights index is the number of days where TN > T ' '(T is a reference temperature, e.g. 20 degree celsius)', profile='', metadata=[ Metadata('ECASLab', 'https://ecaslab.dkrz.de/home.html'), Metadata('ECASLab Notebooks', 'https://github.com/ECAS-Lab/ecas-notebooks'), Metadata( 'Documentation', 'https://ecas-wps-demo.readthedocs.io/en/latest/processes.html#index_tn', role=util.WPS_ROLE_DOC), Metadata( 'Media', 'https://ecas-wps-demo.readthedocs.io/en/latest/_static/media/tropical_nights_thumbnail.png', role=util.WPS_ROLE_MEDIA), ], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def _handler(self, request, response): ts_fn = request.inputs["ts"][0].file p_fn = request.inputs["params"][0].file v = request.inputs["variable"][ 0].data if "variable" in request.inputs else None format = request.inputs["format"][0].data # Create and save graphics ds = xr.open_dataset(ts_fn) if v is None: v = list(ds.data_vars.keys())[0] ts = ds[v] p = xr.open_dataset(p_fn)["params"] # Name of variable is hard-coded fig = ts_fit_graph(ts, p) if format == "plotly": # This is not working great with this figure due to the twin axes. raise NotImplementedError # Create plotly object # obj = mpl_to_plotly(fig) # Convert to JSON # response.outputs['graph_fit'].data = obj.to_json() # response.outputs['graph_fit'].data_format = Format('application/json') else: fig_fn = Path(self.workdir) / ("ts_fit." + format) fig.savefig(fig_fn, format=format) plt.close(fig) response.outputs["graph_fit"].file = str(fig_fn) if format in ["png", "jpeg"]: response.outputs["graph_fit"].data_format = Format( "image/{}".format(format)) elif format == "pdf": response.outputs["graph_fit"].data_format = Format( "application/pdf") return response
def __init__(self): inputs = [ LiteralInput( 'variables', 'Variables to extract', data_type='string', min_occurs=1, max_occurs=len(data), mode=MODE.SIMPLE, allowed_values=list(data.keys()), ), ComplexInput( 'csv', 'CSV occurrences with date', supported_formats=[Format('text/csv')], min_occurs=1, max_occurs=1, # There is no CSV validator, so we have to use None mode=MODE.NONE), ] outputs = [ ComplexOutput('output', 'Metadata', as_reference=True, supported_formats=[Format('text/csv')]), ] super(ANUClimDailyExtract, self).__init__( self._handler, identifier='anuclim_daily_extract', title='ANUClim daily climate data extract.', abstract= "Extracts env variables at specific location and time from ANUClimate daily climate grids.", version='1', metadata=[], inputs=inputs, outputs=outputs, store_supported=True, # TODO: birdy does not handle this? .. or rather if async call, # birdy asks for status, but pywps process says no to it and fails the request status_supported=True)
def _handler(self, request, response): response.update_status("starting ...", 0) # run diag response.update_status("running diag ...", 20) # result plot response.update_status("collect output plot ...", 90) response.outputs['output'].output_format = Format('image/png') response.outputs['output'].file = util.diagdata_file( os.path.join('modes_of_variability', 'era_interim_1990-01-2010-01_clusters.png')) response.update_status("done.", 100) return response
def __init__(self): inputs = [ ComplexInput('text', 'Text document', abstract='URL pointing to text document', supported_formats=[Format('text/plain')]), ] outputs = [ ComplexOutput('output', 'Word counter result', as_reference=True, supported_formats=[Format('application/json')]), ] super(WordCounter, self).__init__( self._handler, identifier='wordcounter', title='Word Counter', abstract="Counts words in a given text.", version='1.0', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def __init__(self): """Sample.""" inputs = [ LiteralInput( 'sentinel_products', 'Sentine Product IDs', data_type='string', abstract=""" helper:sentinel_product_select """, ), ComplexInput( 'geojson', 'GeoJSON region', supported_formats=[Format('application/vnd.geo+json')], abstract="GeoJson", mode=MODE.SIMPLE, max_occurs=1), LiteralInput('ranged', 'Ranged Value', abstract=""" Sample of allowed_value usage """, data_type='integer', allowed_values=AllowedValue( allowed_type=ALLOWEDVALUETYPE.RANGE, minval=0, maxval=100), max_occurs=1) ] outputs = [ LiteralOutput( 'none', 'Nothing', data_type='string', abstract=""" Empty ouput. """, ) ] super(InputDemos, self).__init__( identifier='input_demos', abstract=""" Input demonstrators. """, version='0.1', title="Input Demos", profile='', metadata=[Metadata('Sample'), Metadata('Input')], inputs=inputs, outputs=outputs, )
def _handler(self, request, response): response.update_status("starting ...", 0) # run diag response.update_status("running diag ...", 20) # result plot response.update_status("collect output plot ...", 90) response.outputs['output'].output_format = Format('image/png') response.outputs['output'].file = util.diagdata_file( os.path.join('rainfarm', 'RainFARM_example_64x64.png')) response.update_status("done.", 100) return response
def __init__(self): self.status_percentage_steps = { "start": 0, "process": 10, "build_output": 95, "complete": 100, } inputs = [ ComplexInput( "netcdf", "Daily NetCDF Dataset", abstract="NetCDF file", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.NETCDF, FORMATS.DODS], ), ComplexInput( "updates_file", "Updates File(yaml)", abstract= "The filepath of an updates file that specifies what to do to the metadata it finds in the NetCDF file", min_occurs=0, max_occurs=1, supported_formats=[ Format( mime_type="text/x-yaml", extension=".yaml", ) ], ), LiteralInput( "updates_string", "Updates String(yaml format)", abstract= "The string in yaml format that specifies what to do to the metadata it finds in the NetCDF file", min_occurs=0, max_occurs=1, data_type="string", ), log_level, ] outputs = [nc_output] super(UpdateMetadata, self).__init__( self._handler, identifier="update_metadata", title="Update NetCDF Metadata", abstract= "Update file containing missing, invalid, or incorrectly named global or variable metadata attributes", store_supported=True, status_supported=True, inputs=inputs, outputs=outputs, )
def test_complex_input_identifier(self): complex_in = ComplexInput('foo', 'Complex foo', keywords=['kw1', 'kw2'], supported_formats=[Format('bar/baz')]) doc = complex_in.describe_xml() self.assertEqual(doc.tag, E.Input().tag) [identifier_el] = xpath_ns(doc, './ows:Identifier') self.assertEqual(identifier_el.text, 'foo') kws = xpath_ns(doc, './ows:Keywords/ows:Keyword') self.assertEqual(len(kws), 2)
def build(self): identifier = self.metadata.get('identifier') title = self.metadata.get('title') if self.metadata.get('format'): parameter_format = getattr(FORMATS, self.metadata.get('format')) parameter = self.complex(identifier, title, [Format(parameter_format.mime_type)]) else: parameter = self.literal(identifier, title, data_type=self.metadata.get('dataType')) return parameter
def __init__(self): """Sample.""" inputs = [ ComplexInput( 'search_polygon', 'GeoJSON region', supported_formats=[Format('application/vnd.geo+json')], abstract=""" GeoJson of region to search """, mode=MODE.SIMPLE, max_occurs=1), LiteralInput('start_date', 'Start date', abstract=""" Datestamp in format YYYYMMDD """, data_type='integer', max_occurs=1), LiteralInput('end_date', 'End date', abstract=""" Datestamp in format YYYYMMDD """, data_type='integer', max_occurs=1) ] outputs = [ LiteralOutput( 'output_dir', 'Workflow data volume path', data_type='string', abstract=""" Path to a directory within the Workflow Data volume. The service will store all outputs in this dir, then provide a reference to the directory which other services can use. """, ) ] super(SentinelDownload, self).__init__( identifier='acquisition:sentinel1', abstract=""" Use sentinelsat python module to download Sentinel 1 data """, version='0.1', title="Download Sentinel 1 Data (referencing data volume)", metadata=[Metadata('Testing')], profile='', inputs=inputs, outputs=outputs, )
def get_outputs(self, result, response): # result plot response.update_status("collecting output ...", 80) varlist = ['tas', 'psl', 'pr', 'sst'] for var in varlist: key = "{}_trend_ann_plot".format(var.lower()) response.outputs[key].output_format = Format('application/png') response.outputs[key].file = runner.get_output( result['work_dir'], # Yes, it's in the work dir path_filter=os.path.join('diagnostic1', 'cvdp'), name_filter="{}.trends.ann".format(var.lower()), output_format="png")
def outputs_from_plot_names(plotlist): plots = [] for plot in plotlist: plots.append( ComplexOutput( '{}_plot'.format(plot.lower()), '{} plot'.format(plot), abstract='Generated {} plot of ESMValTool processing.'.format( plot), as_reference=True, supported_formats=[Format('image/png')])) return plots
def __init__(self): inputs = [ ComplexInput('layer', 'Layer', supported_formats=[Format('application/gml+xml')]) ] outputs = [ ComplexOutput('out', 'Referenced Output', supported_formats=[Format('application/json')]) ] super(Centroids, self).__init__(self._handler, identifier='centroids', title='Process Centroids', abstract='Returns a GeoJSON \ with centroids of features from an uploaded GML.', inputs=inputs, outputs=outputs, store_supported=True, status_supported=True)
def _handler(self, request, response): response.update_status("starting ...", 0) # build esgf search constraints constraints = dict( model=request.inputs['model'][0].data, experiment=request.inputs['experiment'][0].data, time_frequency='mon', cmor_table='Amon', ensemble=request.inputs['ensemble'][0].data, ) # generate recipe response.update_status("generate recipe ...", 10) recipe_file, config_file = runner.generate_recipe( workdir=self.workdir, diag='py_demo', constraints=constraints, start_year=request.inputs['start_year'][0].data, end_year=request.inputs['end_year'][0].data, output_format='png', ) # run diag response.update_status("running diag ...", 20) logfile, plot_dir, work_dir = runner.run(recipe_file, config_file) # recipe output response.outputs['recipe'].output_format = FORMATS.TEXT response.outputs['recipe'].file = recipe_file # log output response.outputs['log'].output_format = FORMATS.TEXT response.outputs['log'].file = logfile # result plot response.update_status("collect output plot ...", 90) response.outputs['output'].output_format = Format('application/png') response.outputs['output'].file = runner.get_output( plot_dir, path_filter=os.path.join('diagnostic1', 'script1'), name_filter="CMIP5*", output_format="png") response.outputs['data'].output_format = FORMATS.NETCDF response.outputs['data'].file = runner.get_output( work_dir, path_filter=os.path.join('diagnostic1', 'script1'), name_filter="CMIP5*", output_format="nc") response.update_status("done.", 100) return response