def __init__(self): inputs = [ ComplexInput('resource', 'Resource', abstract="NetCDF Files or archive (tar/zip) containing netCDF files", min_occurs=1, max_occurs=1000, # maxmegabites=5000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), LiteralInput("region", "Region", # abstract= countries_longname(), # need to handle special non-ascii char in countries. default='DEU', data_type='string', min_occurs=1, max_occurs=len(countries()), allowed_values=countries()), # REGION_EUROPE #COUNTRIES ] ########### # OUTPUTS ########### outputs = [ ComplexOutput('output_nc', "Subsets", abstract="Tar archive containing the netCDF files", as_reference=True, supported_formats=[Format("application/x-tar")], ), ComplexOutput('output_factsheet', "Climate Fact Sheet", abstract="Short overview of the climatological situation of the selected countries", as_reference=True, supported_formats=[Format('application/pdf')], ), ComplexOutput('output_log', 'Logging information', abstract="Collected logs during process run.", as_reference=True, supported_formats=[Format("text/plain")]), ] super(FactsheetProcess, self).__init__( self._handler, identifier="climatefactsheet", title="Climate Fact Sheet Generator (init)", version="0.2", abstract="Returns a pdf with a short overview of the climatological situation for the selected countries", metadata=[ # {"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"}, {"title": "Doc", "href": "http://flyingpigeon.readthedocs.io/en/latest/"}, ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True )
def __init__(self): WPSProcess.__init__(self, identifier = "segetalflora", title="Segetal Flora", version = "0.3", metadata=[{"title": "Institut Pierre Simon Laplace", "href": "https://www.ipsl.fr/en/"}], abstract="Species biodiversity of segetal flora. Imput files: variable:tas , domain: EUR-11 or EUR-44", statusSupported=True, storeSupported=True ) self.netcdf_file = self.addComplexInput( identifier="netcdf_file", title="NetCDF Files", abstract="NetCDF File", minOccurs=1, maxOccurs=1000, maxmegabites=500000, formats=[{"mimeType":"application/x-netcdf"}], ) self.climate_type = self.addLiteralInput( identifier="climate_type", title="Climate type", abstract="Select climate type", default='3', type=type(''), minOccurs=1, maxOccurs=8, allowedValues=["1", "2", "3", "4", "5", "6", "7", "all"] # sem ) self.culture_type = self.addLiteralInput( identifier="culture_type", title="Culture type", abstract="Select culture type", default='fallow', type=type(''), minOccurs=1, maxOccurs=8, allowedValues=["fallow", "intensiv", "extensiv"] # sem ) self.region = self.addLiteralInput( identifier="region", title="Region", abstract="European Regions ...", default='FRA', type=type(''), minOccurs=0, maxOccurs=25, allowedValues=countries() ) #complex output #------------- #self.logout = self.addComplexOutput( #identifier="logout", #title="Indice log-file", #abstract="logfile for segetalflora process", #metadata=[], #formats=[{"mimeType":"text/plain"}], #asReference=True, #) self.out_tas = self.addComplexOutput( title="tas_EUR", abstract="Tar archive containing the netCDF EUR tas mean files", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="out_tas", ) self.out_segetalflora = self.addComplexOutput( title="polygons", abstract="Tar archive containing the netCDF EU-countries polygons segetalflora ", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="out_segetalflora", ) self.out_fieldmeans = self.addComplexOutput( title="fieldmeans", abstract="Tar archive containing the netCDF EU-countries fieldmeans segetalflora ", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="out_fieldmeans", ) self.out_plots = self.addComplexOutput( title="plots", abstract="Tar archive containing the bokeh plots html files for segetalflora ", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="out_plots", )
def __init__(self): WPSProcess.__init__( self, identifier="indices_percentile", title="Climate indices -- Percentile", version="0.9", abstract= "Climate indices based on one single input variable and the percentile of a reference period.", metadata=[ { 'title': 'Documentation', 'href': 'http://flyingpigeon.readthedocs.io/en/latest/descriptions/index.html#climate-indices' }, { "title": "ICCLIM", "href": "http://icclim.readthedocs.io/en/latest/" }, { "title": "Percentile-based indices", "href": "http://flyingpigeon.readthedocs.io/en/latest/descriptions/indices.html#percentile-based-indices" }, ], statusSupported=True, storeSupported=True) self.resource = self.addComplexInput( identifier="resource", title="Resouce", abstract="NetCDF File", minOccurs=1, maxOccurs=100, maxmegabites=5000, formats=[{ "mimeType": "application/x-netcdf" }], ) self.indices = self.addLiteralInput( identifier="indices", title="Index", abstract='Select an index', default='TG', type=type(''), minOccurs=1, maxOccurs=1, # len(indices()), allowedValues=['TG', 'TN', 'TX'], # indices() ) self.percentile = self.addLiteralInput( identifier="percentile", title="Percentile", abstract='Select an percentile', default=90, type=type('0'), minOccurs=1, maxOccurs=1, # len(indices()), allowedValues=range(1, 100), # indices() ) self.refperiod = self.addLiteralInput( identifier="refperiod", title="Reference Period", abstract="Time refperiod to retrieve the percentile level", default="19700101-20101231", type=type(''), minOccurs=0, maxOccurs=1, ) #self.refperiod = self.addLiteralInput( #identifier="refperiod", #title="Reference refperiod", #abstract="Reference refperiod for climate condition (all = entire timeserie)", #default=None, #type=type(''), #minOccurs=0, #maxOccurs=1, #allowedValues=['all','1951-1980', '1961-1990', '1971-2000','1981-2010'] #) self.groupings = self.addLiteralInput( identifier="groupings", title="Grouping", abstract="Select an time grouping (time aggregation)", default='yr', type=type(''), minOccurs=1, maxOccurs=len(GROUPING), allowedValues=GROUPING) self.polygons = self.addLiteralInput(identifier="polygons", title="Country subset", abstract=countries_longname(), default='DEU', type=type(''), minOccurs=0, maxOccurs=len(countries()), allowedValues=countries()) self.mosaic = self.addLiteralInput( identifier="mosaic", title="Mosaic", abstract= "If Mosaic is checked, selected polygons be clipped as a mosaic for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) # complex output # ------------- self.output = self.addComplexOutput( identifier="output", title="Index", abstract="Calculated index as NetCDF file", metadata=[], formats=[{ "mimeType": "application/x-tar" }], asReference=True)
def __init__(self): WPSProcess.__init__( self, identifier="subset_countries", title="Subset countries", version="0.9", abstract="Returns only the selected polygon for each input dataset", metadata=[ { "title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php" }, { "title": "Documentation", "href": "http://flyingpigeon.readthedocs.io/en/latest/" }, ], statusSupported=True, storeSupported=True) self.resource = self.addComplexInput( identifier="resource", title="Resource", abstract= "NetCDF Files or archive (tar/zip) containing netCDF files", minOccurs=1, maxOccurs=1000, maxmegabites=5000, formats=[{ "mimeType": "application/x-netcdf" }, { "mimeType": "application/x-tar" }, { "mimeType": "application/zip" }], ) self.region = self.addLiteralInput( identifier="region", title="Region", # abstract= countries_longname(), # need to handle special non-ascii char in countries. default='DEU', type=type(''), minOccurs=1, maxOccurs=len(countries()), allowedValues=countries() # REGION_EUROPE #COUNTRIES # ) self.mosaic = self.addLiteralInput( identifier="mosaic", title="Mosaic", abstract= "If Mosaic is checked, selected polygons will be merged to one Mosaic for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) # self.dimension_map = self.addLiteralInput( # identifier="dimension_map", # title="Dimension Map", # abstract= 'if not ordered in lon/lat a dimension map has to be provided', # type=type(''), # minOccurs=0, # maxOccurs=1 # ) # self.variable = self.addLiteralInput( # identifier="variable", # title="Variable", # abstract="Variable to be expected in the input files (Variable will be detected if not set)", # default=None, # type=type(''), # minOccurs=0, # maxOccurs=1, # ) ########### # OUTPUTS ########### self.output = self.addComplexOutput( title="Subsets", abstract="Tar archive containing the netCDF files", formats=[{ "mimeType": "application/x-tar" }], asReference=True, identifier="output", ) self.output_netcdf = self.addComplexOutput( title="Subsets for one dataset", abstract="NetCDF file with subsets of one dataset.", formats=[{ "mimeType": "application/x-netcdf" }], asReference=True, identifier="ncout", ) self.output_log = self.addComplexOutput( identifier="output_log", title="Logging information", abstract="Collected logs during process run.", formats=[{ "mimeType": "text/plain" }], asReference=True, )
def __init__(self): WPSProcess.__init__( self, identifier = "subset_countries", title="Subset netCDF files", version = "0.3", abstract="This process returns only the given polygon from input netCDF files.", statusSupported=True, storeSupported=True ) self.resource = self.addComplexInput( identifier="resource", title="Resource", abstract="NetCDF File", minOccurs=1, maxOccurs=1000, maxmegabites=5000, formats=[{"mimeType":"application/x-netcdf"}], ) self.region = self.addLiteralInput( identifier="region", title="Region", #abstract= countries_longname(), # need to handle special non-ascii char in countries. default='DEU', type=type(''), minOccurs=1, maxOccurs=len(countries()), allowedValues=countries() #REGION_EUROPE #COUNTRIES # ) self.dimension_map = self.addLiteralInput( identifier="dimension_map", title="Dimension Map", abstract= 'if not ordered in lon/lat a dimension map has to be provided', type=type(''), minOccurs=0, maxOccurs=1 ) self.variable = self.addLiteralInput( identifier="variable", title="Variable", abstract="Variable to be expected in the input files (Variable will be detected if not set, )", default=None, type=type(''), minOccurs=0, maxOccurs=1, ) self.mosaik = self.addLiteralInput( identifier="mosaik", title="Mosaik", abstract="If Mosaik is checked, selected polygons will be merged to one Mosaik for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) self.output = self.addComplexOutput( title="Subsets", abstract="Tar archive containing the netCDF files", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="output", )
def __init__(self): WPSProcess.__init__( self, identifier = "subset_countries", title="Subset countries", version = "0.9", abstract="Returns only the selected polygon for each input dataset", metadata= [ {"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"}, {"title": "Documentation", "href": "http://flyingpigeon.readthedocs.io/en/latest/"}, ], statusSupported=True, storeSupported=True ) self.resource = self.addComplexInput( identifier="resource", title="Resource", abstract="NetCDF File", minOccurs=1, maxOccurs=1000, maxmegabites=5000, formats=[{"mimeType":"application/x-netcdf"}], ) self.region = self.addLiteralInput( identifier="region", title="Region", #abstract= countries_longname(), # need to handle special non-ascii char in countries. default='DEU', type=type(''), minOccurs=1, maxOccurs=len(countries()), allowedValues=countries() #REGION_EUROPE #COUNTRIES # ) self.mosaic = self.addLiteralInput( identifier="mosaic", title="Mosaic", abstract="If Mosaic is checked, selected polygons will be merged to one Mosaic for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) # self.dimension_map = self.addLiteralInput( # identifier="dimension_map", # title="Dimension Map", # abstract= 'if not ordered in lon/lat a dimension map has to be provided', # type=type(''), # minOccurs=0, # maxOccurs=1 # ) self.variable = self.addLiteralInput( identifier="variable", title="Variable", abstract="Variable to be expected in the input files (Variable will be detected if not set)", default=None, type=type(''), minOccurs=0, maxOccurs=1, ) self.output = self.addComplexOutput( title="Subsets", abstract="Tar archive containing the netCDF files", formats=[{"mimeType":"application/x-tar"}], asReference=True, identifier="output", ) self.output_netcdf = self.addComplexOutput( title="Subsets for one dataset", abstract="NetCDF file with subsets of one dataset.", formats=[{"mimeType":"application/x-netcdf"}], asReference=True, identifier="ncout", )
def __init__(self): inputs = [ LiteralInput( 'region', 'Region', data_type='string', # abstract= countries_longname(), # need to handle special non-ascii char in countries. abstract="Country code, see ISO-3166-3:\ https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3#Officially_assigned_code_elements", min_occurs=1, max_occurs=len(countries()), default='DEU', allowed_values=countries()), # REGION_EUROPE #COUNTRIES LiteralInput('mosaic', 'Union of multiple regions', data_type='boolean', abstract="If True, selected regions will be merged" " into a single geometry.", min_occurs=0, max_occurs=1, default=False), ComplexInput( 'resource', 'Resource', abstract= 'NetCDF Files or archive (tar/zip) containing NetCDF files.', metadata=[Metadata('Info')], min_occurs=1, max_occurs=1000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), ] outputs = [ ComplexOutput( 'output', 'Tar archive', abstract="Tar archive of the subsetted netCDF files.", as_reference=True, supported_formats=[Format('application/x-tar')]), ComplexOutput('ncout', 'Example netCDF file', abstract="NetCDF file with subset for one dataset.", as_reference=True, supported_formats=[Format('application/x-netcdf')]), ComplexOutput('output_log', 'Logging information', abstract="Collected logs during process run.", as_reference=True, supported_formats=[Format('text/plain')]) ] super(ClippingProcess, self).__init__( self._handler, identifier="subset_countries", title="Subset (World Countries)", version="0.10", abstract= "Return the data whose grid cells intersect the selected countries for each input dataset.", metadata=[ Metadata('LSCE', 'http://www.lsce.ipsl.fr/en/index.php'), Metadata('Doc', 'http://flyingpigeon.readthedocs.io/en/latest/'), ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True, )
def __init__(self): WPSProcess.__init__( self, identifier="indices_simple", title="Climate indices -- Simple", version="0.9", abstract="Climate indices based on one single input variable.", metadata=[ {'title': 'Documentation', 'href': 'http://flyingpigeon.readthedocs.io/en/latest/descriptions/index.html#climate-indices'}, {"title": "ICCLIM" , "href": "http://icclim.readthedocs.io/en/latest/"}, {"title": "Simple Indices", "href": "http://flyingpigeon.readthedocs.io/en/latest/descriptions/indices.html"} ], statusSupported=True, storeSupported=True ) self.resource = self.addComplexInput( identifier="resource", title="Resouce", abstract="NetCDF File", minOccurs=1, maxOccurs=100, maxmegabites=5000, formats=[{"mimeType": "application/x-netcdf"}], ) self.groupings = self.addLiteralInput( identifier="groupings", title="Grouping", abstract="Select an time grouping (time aggregation)", default='yr', type=type(''), minOccurs=1, maxOccurs=len(GROUPING), allowedValues=GROUPING ) self.indices = self.addLiteralInput( identifier="indices", title="Index", abstract=indices_description(), default='SU', type=type(''), minOccurs=1, maxOccurs=len(indices()), allowedValues=indices() ) self.polygons = self.addLiteralInput( identifier="polygons", title="Country subset", abstract=str(countries_longname()), type=type(''), minOccurs=0, maxOccurs=len(countries()), allowedValues=countries() ) self.mosaic = self.addLiteralInput( identifier="mosaic", title="Mosaic", abstract="If Mosaic is checked, selected polygons be clipped as a mosaic for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) # complex output # ------------- self.output = self.addComplexOutput( identifier="output", title="Index", abstract="Calculated index as NetCDF file", metadata=[], formats=[{"mimeType": "application/x-tar"}], asReference=True ) self.output_netcdf = self.addComplexOutput( title="one dataset as example", abstract="NetCDF file to be dispayed on WMS", formats=[{"mimeType":"application/x-netcdf"}], asReference=True, identifier="ncout", )
def __init__(self): inputs = [ ComplexInput( 'resource', 'Resource', abstract= "NetCDF Files or archive (tar/zip) containing netCDF files.", min_occurs=1, max_occurs=1000, # maxmegabites=5000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), # LiteralInput("indices", "Index", # abstract='Select an index', # default='TG', # data_type='string', # min_occurs=1, # max_occurs=1, # len(indices()), # allowed_values=['TG', 'TN', 'TX'], # indices() # ), LiteralInput( "percentile", "Percentile", abstract='Percentile value [1-100].', default='90', data_type='integer', min_occurs=1, max_occurs=1, # len(indices()), allowed_values=range(1, 100), # indices() ), # LiteralInput("refperiod", "Reference Period", # abstract="Time refperiod to retrieve the percentile level", # default="19700101-20101231", # data_type='string', # min_occurs=0, # max_occurs=1, # ), # # self.refperiod = self.addLiteralInput( # identifier="refperiod", # title="Reference refperiod", # abstract="Reference refperiod for climate condition (all = entire timeserie)", # default=None, # type=type(''), # minOccurs=0, # maxOccurs=1, # allowedValues=['all','1951-1980', '1961-1990', '1971-2000','1981-2010'] # ) # LiteralInput("grouping", "Grouping", # abstract="Select an time grouping (time aggregation)", # default='yr', # data_type='string', # min_occurs=1, # max_occurs=1, # allowed_values=GROUPING # ), LiteralInput( 'region', 'Region', data_type='string', abstract="Country code, see ISO-3166-3:\ https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3#Officially_assigned_code_elements", min_occurs=0, max_occurs=len(countries()), allowed_values=countries()), # REGION_EUROPE #COUNTRIES LiteralInput( "mosaic", "Mosaic", abstract= "If Mosaic is checked, selected polygons be clipped as a mosaic for each input file.", default='0', data_type='boolean', min_occurs=0, max_occurs=1, ), ] outputs = [ ComplexOutput( "output_archive", "Tar archive", abstract= "Tar archive of the netCDF files storing the percentile values.", supported_formats=[Format("application/x-tar")], as_reference=True, ), ComplexOutput( 'ncout', 'Example netCDF file', abstract= "NetCDF file storing the percentiles computed over one dataset.", as_reference=True, supported_formats=[Format('application/x-netcdf')]), ComplexOutput('output_log', 'Logging information', abstract="Collected logs during process run.", as_reference=True, supported_formats=[Format("text/plain")]) ] super(IndicespercentiledaysProcess, self).__init__( self._handler, identifier="indices_percentiledays", title="Climate indices (Daily percentiles)", version="0.10", abstract="Climatological percentile for each day of the year " "computed over the entire dataset.", metadata=[ { 'title': 'Doc', 'href': 'http://flyingpigeon.readthedocs.io/en/latest/descriptions/\ index.html#climate-indices' }, { "title": "ICCLIM", "href": "http://icclim.readthedocs.io/en/latest/" }, { "title": "Percentile-based indices", "href": "http://flyingpigeon.readthedocs.io/en/\ latest/descriptions/indices.html#percentile-based-indices" }, ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True)
def __init__(self): inputs = [ ComplexInput('resource', 'Resource', abstract="NetCDF Files or archive (tar/zip) containing netCDF files.", min_occurs=1, max_occurs=1000, # maxmegabites=5000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), LiteralInput("indices", "Index", abstract='Climate index code.', default='TG', data_type='string', min_occurs=1, max_occurs=1, allowed_values=indices() ), LiteralInput("grouping", "Grouping", abstract="Temporal group over which the index is computed.", default='yr', data_type='string', min_occurs=1, max_occurs=1, # len(GROUPING), allowed_values=GROUPING ), LiteralInput('region', 'Region', data_type='string', # abstract= countries_longname(), # need to handle special non-ascii char in countries. abstract="Country code, see ISO-3166-3:\ https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3#Officially_assigned_code_elements", min_occurs=0, max_occurs=len(countries()), # default='DEU', allowed_values=countries()), # REGION_EUROPE #COUNTRIES LiteralInput("mosaic", "Mosaic", abstract="If Mosaic is checked, selected polygons be clipped as a mosaic for each input file", default='0', data_type='boolean', min_occurs=0, max_occurs=1, ), ] outputs = [ ComplexOutput("output_archive", "Tar archive", abstract="Tar archive of the netCDF files storing the index values.", supported_formats=[Format("application/x-tar")], as_reference=True, ), ComplexOutput('ncout', 'Example netCDF file', abstract="NetCDF file storing the index computed over one dataset.", as_reference=True, supported_formats=[Format('application/x-netcdf')] ), ComplexOutput('output_log', 'Logging information', abstract="Collected logs during process run.", as_reference=True, supported_formats=[Format("text/plain")]) ] super(IndicessingleProcess, self).__init__( self._handler, identifier="indices_single", title="Climate indices (Single variable)", version="0.10", abstract="Climate index calculated from one daily input variable.", metadata=[ {'title': 'Doc', 'href': 'http://flyingpigeon.readthedocs.io/en/latest/descriptions/\ index.html#climate-indices'}, {"title": "ICCLIM", "href": "http://icclim.readthedocs.io/en/latest/"}, {"title": "Percentile-based indices", "href": "http://flyingpigeon.readthedocs.io/en/\ latest/descriptions/indices.html#percentile-based-indices"}, ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True )
def __init__(self): WPSProcess.__init__( self, identifier = "indices_percentile", title="Climate indices -- Percentile", version = "0.1", abstract="Climate indices based on one single input variable and the percentile of a referece refperiod.", metadata = [ {'title': 'Documentation', 'href': 'http://flyingpigeon.readthedocs.io/en/latest/descriptions/index.html#climate-indices'}, {"title": "ICCLIM" , "href": "http://icclim.readthedocs.io/en/latest/"}, {"title": "Percentile-based indices", "href": "http://flyingpigeon.readthedocs.io/en/latest/descriptions/indices.html#percentile-based-indices"}, ], statusSupported=True, storeSupported=True ) self.resource = self.addComplexInput( identifier="resource", title="Resouce", abstract="NetCDF File", minOccurs=1, maxOccurs=100, maxmegabites=5000, formats=[{"mimeType":"application/x-netcdf"}], ) self.indices = self.addLiteralInput( identifier="indices", title="Indice", abstract='Select an indice', default='TG', type=type(''), minOccurs=1, maxOccurs=1, # len(indices()), allowedValues=['TG', 'TN', 'TX'], # indices() ) self.percentile = self.addLiteralInput( identifier="percentile", title="Percentile", abstract='Select an percentile', default=90, type=type('0'), minOccurs=1, maxOccurs=1, # len(indices()), allowedValues=range(1,100), # indices() ) self.refperiod = self.addLiteralInput( identifier="refperiod", title="Reference Period", abstract="Time refperiod to retrieve the percentile level", default="19700101-20101231", type=type(''), minOccurs=0, maxOccurs=1, ) #self.refperiod = self.addLiteralInput( #identifier="refperiod", #title="Reference refperiod", #abstract="Reference refperiod for climate condition (all = entire timeserie)", #default=None, #type=type(''), #minOccurs=0, #maxOccurs=1, #allowedValues=['all','1951-1980', '1961-1990', '1971-2000','1981-2010'] #) self.groupings = self.addLiteralInput( identifier="groupings", title="Grouping", abstract="Select an time grouping (time aggregation)", default='yr', type=type(''), minOccurs=1, maxOccurs=len(GROUPING), allowedValues=GROUPING ) self.polygons = self.addLiteralInput( identifier="polygons", title="Country subset", abstract= countries_longname(), default='DEU', type=type(''), minOccurs=0, maxOccurs=len(countries()), allowedValues=countries() ) self.mosaik = self.addLiteralInput( identifier="mosaik", title="Mosaik", abstract="If Mosaik is checked, selected polygons be clipped as a mosaik for each input file", default=False, type=type(False), minOccurs=0, maxOccurs=1, ) # complex output # ------------- self.output = self.addComplexOutput( identifier="output", title="Indice", abstract="Calculated indice as NetCDF file", metadata=[], formats=[{"mimeType":"application/x-tar"}], asReference=True )
def __init__(self): WPSProcess.__init__( self, identifier = "indices_single", title="Calculation of climate indice (single variable)", version = "0.3", abstract="This process calculates climate indices based on one single variable.", statusSupported=True, storeSupported=True ) self.resource = self.addComplexInput( identifier="resource", title="Resouce", abstract="NetCDF File", minOccurs=1, maxOccurs=100, maxmegabites=5000, formats=[{"mimeType":"application/x-netcdf"}], ) self.groupings = self.addLiteralInput( identifier="groupings", title="Grouping", abstract="Select an time grouping (time aggregation)", default='yr', type=type(''), minOccurs=1, maxOccurs=len(GROUPING), allowedValues=GROUPING ) self.indices = self.addLiteralInput( identifier="indices", title="Indice", abstract=indices_description(), default='SU', type=type(''), minOccurs=1, maxOccurs=len(indices()), allowedValues=indices() ) self.polygons = self.addLiteralInput( identifier="polygons", title="Country subset", abstract= countries_longname(), #default='FRA', type=type(''), minOccurs=0, maxOccurs=len(countries()), allowedValues=countries() ) # complex output # ------------- self.output = self.addComplexOutput( identifier="output", title="Indice", abstract="Calculated indice as NetCDF file", metadata=[], formats=[{"mimeType":"application/x-netcdf"}], asReference=True )
def __init__(self): inputs = [ LiteralInput('region', 'Region', data_type='string', # abstract= countries_longname(), # need to handle special non-ascii char in countries. abstract="Country code, see ISO-3166-3:\ https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3#Officially_assigned_code_elements", min_occurs=1, max_occurs=len(countries()), default='DEU', allowed_values=countries()), LiteralInput('mosaic', 'Union of multiple regions', data_type='boolean', abstract="If True, selected regions will be merged" " into a single geometry.", min_occurs=0, max_occurs=1, default=False), ComplexInput('resource', 'Resource', abstract='NetCDF Files or archive (tar/zip) containing NetCDF files.', min_occurs=1, max_occurs=1000, supported_formats=[ Format('application/x-netcdf'), Format('application/x-tar'), Format('application/zip'), ]), ] outputs = [ ComplexOutput('output', 'Tar archive', abstract="Tar archive of the subsetted netCDF files.", as_reference=True, supported_formats=[Format('application/x-tar')] ), ComplexOutput('ncout', 'Example netCDF file', abstract="NetCDF file with subset for one dataset.", as_reference=True, supported_formats=[Format('application/x-netcdf')] ), # ComplexOutput('output_log', 'Logging information', # abstract="Collected logs during process run.", # as_reference=True, # supported_formats=[Format('text/plain')] # ) ] super(SubsetcountryProcess, self).__init__( self._handler, identifier="subset_countries", title="Subset Countries", version="0.11", abstract="Return the data whose grid cells intersect the selected countries for each input dataset.", metadata=[ Metadata('Doc', 'https://flyingpigeon.readthedocs.io/en/latest/processes_des.html#subset-processes'), ], inputs=inputs, outputs=outputs, status_supported=True, store_supported=True, )