示例#1
0
    def __init__(self):

        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                'NetCDF Files or archive (tar/zip) containing NetCDF files.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),
            LiteralInput(
                "method",
                "Method of robustness calculation",
                abstract=
                "Detailed information about the methods can be found in the documentation",
                data_type='string',
                default='signal_noise_ratio',
                min_occurs=0,
                max_occurs=1,
                allowed_values=['signal_noise_ratio', 'Method_B', 'Method_C']),
            LiteralInput(
                "start",
                "Start Year",
                abstract=
                "Beginn of the analysed period (e.g 19710101; if not set, the first consistend \
                                  year of the ensemble will be taken)",
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
                # default='1971'
                # allowedValues=range(1900,2200)
            ),
            LiteralInput(
                'end',
                "End Year",
                abstract="End of the analysed period (e.g. 20501231 if not set, \
                                   the last consistend year of the ensemble will be taken)",
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
                # default='2000',
            ),
            LiteralInput(
                "timeslice",
                "Time slice",
                abstract=
                "Time slice (in days) for robustness reference e.g. 3650",
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
                # default='3560'
                # allowedValues=range(1,50)
            ),

            # self.variableIn = self.addLiteralInput(
            #   identifier="variable",
            #   title="Variable",
            #   abstract="Variable to be expected in the input files (Variable will be detected if not set, )",
            #   # default=None,
            #   type=type(''),
            #   minOccurs=0,
            #   maxOccurs=1,
            #   )
        ]

        outputs = [
            ComplexOutput(
                'output_high',
                'Mask for areas with high agreement',
                abstract="netCDF file containing calculated robustness mask",
                supported_formats=[Format('application/x-netcdf')],
                as_reference=True,
            ),
            ComplexOutput(
                'output_low',
                'Mask for areas with low agreement',
                abstract="netCDF file containing calculated robustness mask",
                supported_formats=[Format('application/x-netcdf')],
                as_reference=True,
            ),
            ComplexOutput(
                'output_signal',
                'Signal',
                abstract=
                "netCDF file containing calculated change of mean over the timeperiod and ensemble",
                supported_formats=[Format('application/x-netcdf')],
                as_reference=True,
            ),
            ComplexOutput(
                "output_graphic",
                "Graphic",
                abstract=
                "Graphic showing the signal difference with high and low ensemble agreement",
                supported_formats=[Format("image/png")],
                as_reference=True,
            ),
            ComplexOutput(
                "output_text",
                "Sourcefiles",
                abstract="text file with a list of the used input data sets",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            ),
            ComplexOutput(
                'output_log',
                'Logging information',
                abstract="Collected logs during process run.",
                supported_formats=[Format('text/plain')],
                as_reference=True,
            )
        ]

        super(RobustnessProcess, self).__init__(
            self._handler,
            identifier="robustness",
            title="Ensemble robustness",
            version="0.5",
            metadata=[Metadata("LSCE", "http://www.lsce.ipsl.fr/")],
            abstract=
            "Calculates the robustness as the ratio of noise to signal in an ensemle of timeseries",
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
示例#2
0
    par_x01=20.0,
    par_x02=1.0,
    par_x03=20.0,
    par_x04=5.0,
    par_x05=0.5,
    par_x06=1.0,
    par_x07=1.0,
    par_x08=1.0,
    par_x09=15.0,
    par_x10=15.0,
)

upperBounds = LiteralInput(
    "upperBounds",
    "Comma separated list of model parameters Upper Bounds",
    abstract="UParameters: " + ", ".join(Uparams_defaults._fields),
    data_type="string",
    default=", ".join(str(p) for p in list(Uparams_defaults)),
    min_occurs=0,
)

lowerBounds = LiteralInput(
    "lowerBounds",
    "Comma separated list of model parameters Lower Bounds",
    abstract="LParameters: " + ", ".join(Lparams_defaults._fields),
    data_type="string",
    default=", ".join(str(p) for p in list(Lparams_defaults)),
    min_occurs=0,
)


class OstrichMOHYSEProcess(OstrichProcess):
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                'NetCDF Files or archive (tar/zip) containing netCDF files.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),
            LiteralInput(
                "gbif",
                "GBIF csv file",
                abstract="GBIF table (csv) with tree occurence \
                         (output of 'GBIF data fetch' process )",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                default=
                'http://localhost:8090/wpsoutputs/flyingpigeon/output_csv-abe15f64-c30d-11e6-bf63-142d277ef1f3.csv'
            ),
            LiteralInput(
                "input_indices",
                "Indices",
                abstract="Climate indices related to growth conditions \
                                    of tree species",
                default=['TG_JJA', 'TNn_Jan'],
                data_type='string',
                min_occurs=1,
                max_occurs=10,
                allowed_values=_SDMINDICES_),
            LiteralInput("period",
                         "Reference period",
                         abstract="Reference period for climate conditions\
                         (all = entire timeseries)",
                         default="all",
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=[
                             'all', '1951-1980', '1961-1990', '1971-2000',
                             '1981-2010'
                         ]),
            LiteralInput(
                "archive_format",
                "Archive format",
                abstract="Result files will be compressed into archives.\
                                  Choose an appropriate format",
                default="tar",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['zip', 'tar'])
        ]
        outputs = [

            # self.output_csv = self.addComplexOutput(
            #     identifier="output_csv",
            #     title="Tree species table",
            #     abstract="Extracted CSV file containing the tree species table",
            #     formats=[{"mimeType": "text/csv"}],
            #     asReference=True,
            #     )
            ComplexOutput(
                "output_gbif",
                "Graphic of GBIF coordinates",
                abstract="PNG graphic file showing the presence of tree species\
                                    according to the CSV file",
                supported_formats=[Format('image/png')],
                as_reference=True,
            ),
            ComplexOutput(
                "output_PA",
                "Graphic of PA mask",
                abstract="PNG graphic file showing PA mask generated based on\
                                    netCDF spatial increment",
                supported_formats=[Format('image/png')],
                as_reference=True,
            ),
            ComplexOutput(
                "output_indices",
                "Climate indices for growth conditions over all timesteps",
                abstract=
                "Archive (tar/zip) containing calculated climate indices",
                supported_formats=[
                    Format('application/x-tar'),
                    Format('application/zip')
                ],
                as_reference=True,
            ),
            ComplexOutput(
                "output_reference",
                "Climate indices for growth conditions of reference period",
                abstract=
                "Archive (tar/zip) containing calculated climate indices",
                supported_formats=[
                    Format('application/x-tar'),
                    Format('application/zip')
                ],
                as_reference=True,
            ),
            ComplexOutput(
                "output_prediction",
                "predicted growth conditions",
                abstract="Archive containing files of the predicted\
                                     growth conditions",
                supported_formats=[
                    Format('application/x-tar'),
                    Format('application/zip')
                ],
                as_reference=True,
            ),
            ComplexOutput(
                "output_info",
                "GAM statistics information",
                abstract="Graphics and information of the learning statistics",
                supported_formats=[Format("application/pdf")],
                as_reference=True,
            ),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')])
        ]

        super(SDMcsvProcess, self).__init__(
            self._handler,
            identifier="sdm_csv",
            title="Species distribution Model (GBIF-CSV table as input)",
            version="0.10",
            metadata=[
                Metadata("LWF", "http://www.lwf.bayern.de/"),
                Metadata(
                    "Doc",
                    "http://flyingpigeon.readthedocs.io/en/latest/descriptions/index.html#species-distribution-model"
                ),
                Metadata("paper",
                         "http://www.hindawi.com/journals/jcli/2013/787250/"),
                Metadata(
                    "Tutorial",
                    "http://flyingpigeon.readthedocs.io/en/latest/tutorials/sdm.html"
                ),
            ],
            abstract="Indices preparation for SDM process",
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
示例#4
0
    def __init__(self):
        inputs = [
            ComplexInput(
                "obs",
                "Stream flow observation",
                abstract="Stream flow observation time series.",
                supported_formats=(FORMATS.NETCDF, ),
            ),
            LiteralInput(
                "obs_var",
                "Observation variable name",
                abstract="Name of the variable in the observation dataset.",
                data_type="string",
                default="q_obs",
                min_occurs=0,
                max_occurs=1,
            ),
            ComplexInput(
                "hcst",
                "Stream flow hindcast",
                abstract=
                "Stream flow hindcast time series, deterministic or ensemble.",
                supported_formats=(FORMATS.NETCDF, ),
            ),
            LiteralInput(
                "hcst_var",
                "Hindcast variable name",
                abstract="Name of the variable in the hindcast dataset.",
                data_type="string",
                default="q_sim",
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "skip_nans",
                "Skip NaNs in metric evaluation",
                abstract="Skip NaNs in hindcast evaluation computations",
                data_type="boolean",
                default=True,
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "BSS_threshold",
                "Threshold for Brier Skill Score exceeding given threshold",
                abstract=
                "Threshold for Brier Skill Score exceeding given threshold",
                data_type="float",
                default=0.7,
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "metric",
                "Forecast evaluation metric name",
                abstract=
                "One or multiple hindcast evaluation metric names. If None, defaults to all.",
                data_type="string",
                allowed_values=all_metrics,
                default=None,
                min_occurs=0,
                max_occurs=len(all_metrics),
            ),
        ]

        outputs = [
            ComplexOutput(
                "metrics",
                "Hindcast evaluation metrics values",
                abstract=
                "JSON dictionary of evaluation metrics averaged over the full period and "
                "all members.",
                supported_formats=(FORMATS.JSON, ),
            ),
        ]

        super(HindcastEvaluationProcess, self).__init__(
            self._handler,
            identifier="hindcast-evaluation",
            title=
            "Hindcast evaluation based on the XSkillScore package for deterministic and ensemble hindcasts.",
            version="1.0",
            abstract=
            "This process takes two NETCDF files (one containing the observed and the other the hindcast "
            "data) "
            "and computes hindcast evaluation metrics between them. Metrics are calculated according to if "
            "there are multiple members in the dataset (probabilistic) or not (deterministic)",
            metadata=[
                Metadata("XSkillScore Documentation",
                         "https://pypi.org/project/xskillscore/")
            ],
            inputs=inputs,
            outputs=outputs,
            keywords=["forecast evaluation", "ensemble", "deterministic"] +
            list(all_metrics),
            status_supported=True,
            store_supported=True,
        )
示例#5
0
    def __init__(self):
        inputs = [
            # *model_experiment_ensemble(
            #     models=['Defaults'],
            #     experiments=['historical'],
            #     ensembles=['r1i1p1'],
            #     start_end_year=(1850, 2005),
            #     start_end_defaults=(1850, 2005)),
            *year_ranges((1850, 2005), (1990, 2005)),
            LiteralInput('season',
                         'Season',
                         abstract='Choose a season like DJF.',
                         data_type='string',
                         allowed_values=['DJF', 'DJFM', 'NDJFM', 'JJA'],
                         default='JJA'),
            LiteralInput('area',
                         'Area',
                         abstract='Area',
                         data_type='string',
                         allowed_values=['EU', 'EAT', 'PNA', 'NH'],
                         default='EU'),
            LiteralInput('extreme',
                         'Extreme',
                         abstract='Extreme',
                         data_type='string',
                         allowed_values=[
                             '60th_percentile', '75th_percentile',
                             '90th_percentile', 'mean', 'maximum', 'std',
                             'trend'
                         ],
                         default='75th_percentile'),
            LiteralInput('numclus',
                         'Number of Clusters',
                         abstract='Numclus',
                         data_type='string',
                         allowed_values=['2', '3', '4'],
                         default='3'),
            LiteralInput('perc',
                         'Percentage',
                         abstract='Percentage of total Variance',
                         data_type='string',
                         allowed_values=['70', '80', '90'],
                         default='80'),
        ]
        outputs = [
            ComplexOutput(
                'plot',
                'Output plot',
                abstract='Generated output plot of ESMValTool processing.',
                as_reference=True,
                supported_formats=[Format('image/eps')]),
            ComplexOutput(
                'ens_extreme',
                'ens_extreme',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'ens_climatologies',
                'ens_climatologies',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'ens_anomalies',
                'ens_anomalies',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput('statistics',
                          'Statistics',
                          abstract='Clustering Statictics',
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(EnsClus, self).__init__(
            self._handler,
            identifier="ensclus",
            title="EnsClus - Ensemble Clustering",
            version=runner.VERSION,
            abstract="""Cluster analysis tool based on the k-means algorithm
                for ensembles of climate model simulations. EnsClus group
                ensemble members according to similar characteristics and
                select the most representative member for each cluster.
                Currently included are the models: ACCESS1-0, ACCESS1-3,
                CanESM2, CCSM4, CESM1-BGC""",
            metadata=[
                Metadata('Estimated Calculation Time', '4 minutes'),
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/latest/recipes/recipe_ensclus.html',
                    role=util.WPS_ROLE_DOC),
                #                Metadata(
                #                    'Media',
                #                    util.diagdata_url() + '/ensclus/ensclus_thumbnail.png',
                #                    role=util.WPS_ROLE_MEDIA),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
    def __init__(self):
        inputs = [
            LiteralInput(
                "indices",
                "Earth Observation Product Indice",
                abstract="Choose an indice based on Earth Observation Data",
                default='NDVI',
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['NDVI', 'BAI']),
            LiteralInput(
                'BBox',
                'Bounding Box',
                data_type='string',
                abstract="Enter a bbox: min_lon, max_lon, min_lat, max_lat."
                " min_lon=Western longitude,"
                " max_lon=Eastern longitude,"
                " min_lat=Southern or northern latitude,"
                " max_lat=Northern or southern latitude."
                " For example: -80,50,20,70",
                min_occurs=1,
                max_occurs=1,
                default='14,15,8,9',
            ),
            LiteralInput(
                'start',
                'Start Date',
                data_type='date',
                abstract='First day of the period to be searched for EO data.'
                '(if not set, 30 days befor end of period will be selected',
                default=(dt.now() - timedelta(days=30)).strftime('%Y-%m-%d'),
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                'end',
                'End Date',
                data_type='date',
                abstract='Last day of the period to be searched for EO data.'
                '(if not set, current day is set.)',
                default=dt.now().strftime('%Y-%m-%d'),
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                'cloud_cover',
                'Cloud Cover',
                data_type='integer',
                abstract='Max tollerated percentage of cloud cover',
                default="30",
                allowed_values=[0, 10, 20, 30, 40, 50, 60, 70, 80, 100]),
            LiteralInput(
                'username',
                'User Name',
                data_type='string',
                abstract=
                'Authentification user name for the COPERNICUS Sci-hub ',
                # default='2013-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'password',
                'Password',
                data_type='string',
                abstract=
                'Authentification password for the COPERNICUS Sci-hub ',
                min_occurs=1,
                max_occurs=1,
            ),
        ]

        outputs = [
            # ComplexOutput("output_txt", "Files search result",
            #               abstract="Files found according to the search querry",
            #               supported_formats=[Format('text/plain')],
            #               as_reference=True,
            #               ),
            ComplexOutput(
                "output_plot",
                "NDVI example file",
                abstract="Plots in RGB colors",
                supported_formats=[Format('image/png')],
                as_reference=True,
            ),
            ComplexOutput(
                "output_archive",
                "Tar archive",
                abstract="Tar archive of the iamge files",
                supported_formats=[Format("application/x-tar")],
                as_reference=True,
            ),
            ComplexOutput(
                "output_log",
                "Logging information",
                abstract="Collected logs during process run.",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            )
        ]

        super(EO_COP_indicesProcess, self).__init__(
            self._handler,
            identifier="EO_COPERNICUS_indices",
            title="EO indices",
            version="0.1",
            abstract="Derivateing indices like NDVI based on",
            metadata=[
                Metadata('Documentation',
                         'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
示例#7
0
__author__ = 'Jachym Cepicky'

from pywps import Process, LiteralInput, ComplexOutput, ComplexInput, Format
from pywps.app.Common import Metadata
from pywps.validator.mode import MODE
from pywps.inout.formats import FORMATS

inpt_vector = ComplexInput(
    'vector',
    'Vector map',
    supported_formats=[Format('application/gml+xml')],
    mode=MODE.STRICT
)

inpt_size = LiteralInput('size', 'Buffer size', data_type='float')

out_output = ComplexOutput(
    'output',
    'HelloWorld Output',
    supported_formats=[Format('application/gml+xml')]
)

inputs = [inpt_vector, inpt_size]
outputs = [out_output]

class DemoBuffer(Process):
    def __init__(self):

        super(DemoBuffer, self).__init__(
            _handler,
示例#8
0
                                    SNOW_SWI_MAX=0.3,
                                    SWI_REDUCT_COEFF=0.1,
                                    DD_REFREEZE_TEMP=2.0,
                                    REFREEZE_FACTOR=5.0,
                                    REFREEZE_EXP=1.0,
                                    PET_CORRECTION=3.0,
                                    HMETS_RUNOFF_COEFF=1.0,
                                    PERC_COEFF=0.02,
                                    BASEFLOW_COEFF_1=0.1,
                                    BASEFLOW_COEFF_2=0.01,
                                    TOPSOIL=0.5,
                                    PHREATIC=2.0)

upperBounds = LiteralInput('upperBounds', 'Comma separated list of model parameters Upper Bounds',
                           abstract='UParameters: ' + ', '.join(Uparams_defaults._fields),
                           data_type='string',
                           default=', '.join(str(p) for p in list(Uparams_defaults)),
                           min_occurs=0)

lowerBounds = LiteralInput('lowerBounds', 'Comma separated list of model parameters Lower Bounds',
                           abstract='LParameters: ' + ', '.join(Lparams_defaults._fields),
                           data_type='string',
                           default=', '.join(str(p) for p in list(Lparams_defaults)),
                           min_occurs=0)


class OstrichHMETSProcess(OstrichProcess):
    """
    OSTRICH emulator for the HMETS model.

    This process calibrates the HMETS model using a OSTRICH emulator. Users need to provide netCDF input