Esempio n. 1
0
    def test_literal_allowed_values_input(self):
        """Test all around allowed_values
        """
        literal = LiteralInput(
            'foo',
            'Foo',
            data_type='integer',
            uoms=['metre'],
            allowed_values=(
                1, 2, (5, 10), (12, 4, 24),
                AllowedValue(
                    allowed_type=ALLOWEDVALUETYPE.RANGE,
                    minval=30,
                    maxval=33,
                    range_closure='closed-open')
            )
        )
        doc = literal.describe_xml()

        allowed_values = xpath_ns(doc, './LiteralData/ows:AllowedValues')
        self.assertEqual(len(allowed_values), 1)

        allowed_value = allowed_values[0]

        values = xpath_ns(allowed_value, './ows:Value')
        ranges = xpath_ns(allowed_value, './ows:Range')

        self.assertEqual(len(values), 2)
        self.assertEqual(len(ranges), 3)
Esempio n. 2
0
 def test_literal_integer_input(self):
     literal = LiteralInput('foo', 'Literal foo', data_type='integer')
     doc = literal.describe_xml()
     assert doc.tag == E.Input().tag
     [identifier_el] = xpath_ns(doc, './ows:Identifier')
     assert identifier_el.text == 'foo'
     [type_el] = xpath_ns(doc, './LiteralData/ows:DataType')
     assert type_el.text == 'integer'
     assert type_el.attrib['reference'] == XMLSCHEMA_2 + 'integer'
Esempio n. 3
0
 def test_literal_integer_input(self):
     literal = LiteralInput("foo", "Literal foo", data_type="positiveInteger", uoms=["metre"])
     doc = literal.describe_xml()
     assert doc.tag == E.Input().tag
     [identifier_el] = xpath_ns(doc, "./ows:Identifier")
     assert identifier_el.text == "foo"
     [type_el] = xpath_ns(doc, "./LiteralData/ows:DataType")
     assert type_el.text == "positiveInteger"
     assert type_el.attrib["{%s}reference" % NAMESPACES["ows"]] == OGCTYPE["positiveInteger"]
     anyvalue = xpath_ns(doc, "./LiteralData/ows:AnyValue")
     assert len(anyvalue) == 1
Esempio n. 4
0
 def test_literal_integer_input(self):
     literal = LiteralInput('foo', 'Literal foo', data_type='positiveInteger', uoms=['metre'])
     doc = literal.describe_xml()
     self.assertEqual(doc.tag, E.Input().tag)
     [identifier_el] = xpath_ns(doc, './ows:Identifier')
     self.assertEqual(identifier_el.text, 'foo')
     [type_el] = xpath_ns(doc, './LiteralData/ows:DataType')
     self.assertEqual(type_el.text, 'positiveInteger')
     self.assertEqual(type_el.attrib['{%s}reference' % NAMESPACES['ows']],
         OGCTYPE['positiveInteger'])
     anyvalue = xpath_ns(doc, './LiteralData/ows:AnyValue')
     self.assertEqual(len(anyvalue), 1)
    def __init__(self):
        inputs = [
            LiteralInput("reanalyses",
                         "Reanalyses Data",
                         abstract="Choose a reanalyses dataset for comparison",
                         default="NCEP_slp",
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=_PRESSUREDATA_),
            LiteralInput(
                "timeres",
                "Reanalyses temporal resolution",
                abstract=
                "Temporal resolution of the reanalyses (only for 20CRV2)",
                default="day",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['day', '6h']),
            LiteralInput(
                'BBox',
                'Bounding Box',
                data_type='string',
                abstract="Enter a bbox: min_lon, max_lon, min_lat, max_lat."
                " min_lon=Western longitude,"
                " max_lon=Eastern longitude,"
                " min_lat=Southern or northern latitude,"
                " max_lat=Northern or southern latitude."
                " For example: -80,50,20,70",
                min_occurs=1,
                max_occurs=1,
                default='-80,50,20,70',
            ),
            LiteralInput(
                'dateSt',
                'Start date of analysis period',
                data_type='date',
                abstract='First day of the period to be analysed',
                default='2013-07-15',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'dateEn',
                'End date of analysis period',
                data_type='date',
                abstract='Last day of the period to be analysed',
                default='2013-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'refSt',
                'Start date of reference period',
                data_type='date',
                abstract='First day of the period where analogues being picked',
                default='2013-01-01',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'refEn',
                'End date of reference period',
                data_type='date',
                abstract='Last day of the period where analogues being picked',
                default='2014-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput("normalize",
                         "normalization",
                         abstract="Normalize by subtraction of annual cycle",
                         default='base',
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=['None', 'base', 'sim', 'own']),
            LiteralInput(
                "seasonwin",
                "Seasonal window",
                abstract=
                "Number of days before and after the date to be analysed",
                default='30',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "nanalog",
                "Nr of analogues",
                abstract="Number of analogues to be detected",
                default='20',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "dist",
                "Distance",
                abstract="Distance function to define analogues",
                default='euclidean',
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['euclidean', 'mahalanobis', 'cosine', 'of']),
            LiteralInput(
                "outformat",
                "output file format",
                abstract="Choose the format for the analogue output file",
                default="ascii",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['ascii', 'netCDF4']),
            LiteralInput(
                "timewin",
                "Time window",
                abstract=
                "Number of days following the analogue day the distance will be averaged",
                default='1',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
        ]

        outputs = [
            ComplexOutput(
                "config",
                "Config File",
                abstract="Config file used for the Fortran process",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            ),
            ComplexOutput(
                "analogs",
                "Analogues File",
                abstract="mulit-column text file",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            ),
            ComplexOutput(
                "formated_analogs",
                "Formated Analogues File",
                abstract="Formated analogues file for viewer",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            ),
            ComplexOutput(
                'output_netcdf',
                'Subsets for one dataset',
                abstract=
                "Prepared netCDF file as input for weatherregime calculation",
                as_reference=True,
                supported_formats=[Format('application/x-netcdf')]),
            ComplexOutput(
                "output",
                "Analogues Viewer html page",
                abstract="Interactive visualization of calculated analogues",
                supported_formats=[Format("text/html")],
                as_reference=True,
            ),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
        ]

        super(AnalogsreanalyseProcess, self).__init__(
            self._handler,
            identifier="analogs_reanalyse",
            title="Analogues of circulation (based on reanalyses data)",
            abstract=
            'Search for days with analogue pressure pattern for reanalyses data sets',
            version="0.10",
            metadata=[
                Metadata('LSCE', 'http://www.lsce.ipsl.fr/en/index.php'),
                Metadata('Doc',
                         'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 6
0
    def __init__(self):
        inputs = [
            LiteralInput(
                'docker_image',
                title='URI of the Docker Image to be deployed and executed',
                abstract=
                'The URI contains the full path to a Docker image as used by Docker Daemon, including the host, port, path, image name and version.',  # This input parameter does not support credentials. Credentials for private Docker registries are set as a system configuration. The credentials are injected in the environment variables of the VM instance that runs the Docker Image',
                data_type='string'),
            LiteralInput(
                'IaaS_deploy_execute',
                title=
                'Json of the IaaS resource where the job will be deployed and executed ()',
                abstract=
                'Json formatted description of a broker url and the queue name where to send the task',  # is the URI of the Message Broker in the form of amqp://broker_ip:broker_port//. The second part is the Task Queue name. For simplicity, both part are appended in a single string. This input parameter does not support credentials. Credentials for Message brokers are set as a system configuration. The credentials are injected in the environment variables of the VM instance that will host the WPS Server',
                allowed_values=[
                    json.dumps(broker_queue)
                    for broker_queue in GetCloudParams.broker_queue_list
                ],
                default=json.dumps(GetCloudParams.broker_queue_list[0]),
                data_type='string'),
            LiteralInput(
                'IaaS_datastore',
                title=
                'URI of an IaaS data store where the outputs will be stored',
                abstract=
                'This parameter sets the target for all outputs of the process (HTTPS fileserver, AWS S3, SWIFT, Globus, etc.)...',  # Outputs will be staged out in the datastore by the process. The current implementation only supports HTTP fileservers. This input parameter does not support credentials. Credentials for datastores are set as a system configuration. The credentials are injected in the environment variables of the VM instance that runs the Docker Image',
                data_type='string'),
            LiteralInput(
                'Radarsat2_data',
                title='URI from where to download the Radarsat-2 data ZIP',
                abstract=
                'The Radarsat-2 file is unzipped in the local drive. The product.xml file is located, ...',  # then provided to RSTB/SNAP.',# The long string identifying the product is used to create temporary directories and to format the output file names',
                data_type='string'),
            LiteralInput(
                'WMS_server',
                title='URI where to register a WMS-compatible output',
                abstract=
                'The process produces an RGB image of the data output. Its smaller footprint is better managed ...',  # by WMS/WCS servers. The RGB output will be staged out in the specified WMS Server by the process. The output parameter named output_data_WMS_url will contain an URL pointing to this WMS server. The credentials are injected in the environment variables of the VM instance that runs the Docker Image.',
                data_type='string'),
            LiteralInput(
                'input_graph_url',
                title=
                'URL where to download the GPT graph used to process the Radarsat-2 data',
                abstract=
                'Allows a user to provide a different processing graph to the process. ',  # In case none is specified, a default graph stored in the application package is used. A graph provided here should present the same Inputs (reads) and Ouputs (writes) as the default graph',
                data_type='string'),
            LiteralInput(
                'input_graph_parameters',
                title='KVP used to parametrize the graph itself',
                abstract=
                'Allows a user to provide customized parameters to the graph in the form of a JSON file.',  # In case none are specified, default values will be used. Currently, the default graph supports Polarimetric-Speckle-Filter.filter, Polarimetric-Speckle-Filter.windowSize and Polarimetric-Speckle-Filter.numLooksStr',
                data_type='string'),
        ]
        outputs = [
            LiteralOutput(
                'output_data_url',
                title='URL to data produced by the process',
                abstract=
                'The URL provided here is dependent on the IaaS_datastore selected. It allows an user to access and download from the Cloud the image data produced by the process. By default in the current implementation, the output data is accessible through HTTP fileservers.',
                data_type='string'),
            LiteralOutput(
                'output_data_WMS_url',
                title='URL to WMS layer for the data produced',
                abstract=
                'The URL provided here is dependent on the WMS_server provided. If no WMS server was specified, this field is left blank. The URL allows an user to access the image data produced by the process in WMS client.',
                data_type='string'),
        ]

        super(Nr102, self).__init__(
            self._handler,
            identifier='nr102',
            abstract=
            'This Web Processing Service (WPS) was developped as a deliverable for OGC Testbed 13 Earth Observation Cloud (EOC) Thread. It aims to meet Natural Resources Canada specifications for hybrid cloud architectures in forestry applications.',
            title='Cloud WPS Biomass with WCS/WMS support 2',
            version='0.1',
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True)
Esempio n. 7
0
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                "NetCDF Files or archive (tar/zip) containing netCDF files",
                min_occurs=1,
                max_occurs=1000,
                #  maxmegabites=5000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),

            # LiteralInput("indices", "Index",
            #              abstract='Select an index',
            #              default='TG',
            #              data_type='string',
            #              min_occurs=1,
            #              max_occurs=1,  # len(indices()),
            #              allowed_values=['TG', 'TN', 'TX'],  # indices()
            #              ),
            LiteralInput(
                "percentile",
                "Percentile",
                abstract='Select an percentile',
                default='90',
                data_type='integer',
                min_occurs=1,
                max_occurs=1,  # len(indices()),
                allowed_values=range(1, 100),  # indices()
            ),

            # LiteralInput("refperiod", "Reference Period",
            #              abstract="Time refperiod to retrieve the percentile level",
            #              default="19700101-20101231",
            #              data_type='string',
            #              min_occurs=0,
            #              max_occurs=1,
            #              ),
            #
            # self.refperiod = self.addLiteralInput(
            #     identifier="refperiod",
            #     title="Reference refperiod",
            #     abstract="Reference refperiod for climate condition (all = entire timeserie)",
            #     default=None,
            #     type=type(''),
            #     minOccurs=0,
            #     maxOccurs=1,
            #     allowedValues=['all','1951-1980', '1961-1990', '1971-2000','1981-2010']
            #     )
            LiteralInput("grouping",
                         "Grouping",
                         abstract="Select an time grouping (time aggregation)",
                         default='yr',
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=GROUPING),
            LiteralInput(
                'region',
                'Region',
                data_type='string',
                # abstract= countries_longname(), # need to handle special non-ascii char in countries.
                abstract="Country ISO-3166-3:\
                          https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3#Officially_assigned_code_elements",
                min_occurs=0,
                max_occurs=len(countries()),
                allowed_values=countries()),  # REGION_EUROPE #COUNTRIES
            LiteralInput(
                "mosaic",
                "Mosaic",
                abstract=
                "If Mosaic is checked, selected polygons be clipped as a mosaic for each input file",
                default='0',
                data_type='boolean',
                min_occurs=0,
                max_occurs=1,
            ),
        ]

        outputs = [
            ComplexOutput(
                "output_archive",
                "Masked Files Archive",
                abstract="Tar file of the masked netCDF files",
                supported_formats=[Format("application/x-tar")],
                as_reference=True,
            ),
            ComplexOutput('ncout',
                          'Subsets for one dataset',
                          abstract="NetCDF file with subsets of one dataset.",
                          as_reference=True,
                          supported_formats=[Format('application/x-netcdf')]),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format("text/plain")])
        ]

        super(IndicespercentiledaysProcess, self).__init__(
            self._handler,
            identifier="indices_percentiledays",
            title="Climate indices (Daily percentiles)",
            version="0.10",
            abstract="Climate indices based on one single input variable\
             Calculating the percentiles for each day in the year.",
            metadata=[
                {
                    'title':
                    'Doc',
                    'href':
                    'http://flyingpigeon.readthedocs.io/en/latest/descriptions/\
                 index.html#climate-indices'
                },
                {
                    "title": "ICCLIM",
                    "href": "http://icclim.readthedocs.io/en/latest/"
                },
                {
                    "title":
                    "Percentile-based indices",
                    "href":
                    "http://flyingpigeon.readthedocs.io/en/\
                latest/descriptions/indices.html#percentile-based-indices"
                },
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 8
0
    def __init__(self):
        inputs = [
            LiteralInput("products", "Earth Observation Product Type",
                         abstract="Choose Earth Observation Products",
                         default='Sentinel-2',
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=['Sentinel-2']
                         ),

            LiteralInput('BBox', 'Bounding Box',
                         data_type='string',
                         abstract="Enter a bbox: min_lon, max_lon, min_lat, max_lat."
                                  " min_lon=Western longitude,"
                                  " max_lon=Eastern longitude,"
                                  " min_lat=Southern or northern latitude,"
                                  " max_lat=Northern or southern latitude."
                                  " For example: -80,50,20,70",
                         min_occurs=1,
                         max_occurs=1,
                         default='14,15,8,9',
                         ),

            LiteralInput('start', 'Start Date',
                         data_type='date',
                         abstract='First day of the period to be searched for EO data.'
                                  '(if not set, 30 days befor end of period will be selected',
                         default=(dt.now() - timedelta(days=30)).strftime('%Y-%m-%d'),
                         min_occurs=0,
                         max_occurs=1,
                         ),

            LiteralInput('end', 'End Date',
                         data_type='date',
                         abstract='Last day of the period to be searched for EO data.'
                                  '(if not set, current day is set.)',
                         default=dt.now().strftime('%Y-%m-%d'),
                         min_occurs=0,
                         max_occurs=1,
                         ),

            LiteralInput('cloud_cover', 'Cloud Cover',
                         data_type='integer',
                         abstract='Max tollerated percentage of cloud cover',
                         default="30",
                         allowed_values=[0, 10, 20, 30, 40, 50, 60, 70, 80, 100]
                         ),

            LiteralInput('username', 'User Name',
                         data_type='string',
                         abstract='Authentification user name for the COPERNICUS Sci-hub ',
                         # default='2013-12-31',
                         min_occurs=1,
                         max_occurs=1,
                         ),

            LiteralInput('password', 'Password',
                         data_type='string',
                         abstract='Authentification password for the COPERNICUS Sci-hub ',
                         min_occurs=1,
                         max_occurs=1,
                         ),
        ]

        outputs = [
            ComplexOutput("output", "Fetched Files",
                          abstract="File containing the local pathes to downloades files.",
                          supported_formats=[Format('text/plain')],
                          as_reference=True,
                          ),

            ComplexOutput("output_log", "Logging information",
                          abstract="Collected logs during process run.",
                          supported_formats=[Format("text/plain")],
                          as_reference=True,
                          )
        ]

        super(EO_COP_fetchProcess, self).__init__(
            self._handler,
            identifier="EO_COP_fetch",
            title="EO COPERNICUS Fetch Resources",
            version="0.1",
            abstract="Fetch EO Data to the local file"
                     "system of the birdhouse compute provider.",
            metadata=[
                Metadata('Documentation', 'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 9
0
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                'NetCDF Files or archive (tar/zip) containing netCDF files.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),
            LiteralInput(
                "indices",
                "Indices",
                abstract="Climate indices related to growth conditions \
                                    of tree species",
                default='TG_JJA',
                data_type='string',
                min_occurs=1,
                max_occurs=10,
                allowed_values=_SDMINDICES_),
            LiteralInput(
                "archive_format",
                "Archive format",
                abstract="Result files will be compressed into archives. \
                                   Choose an appropriate format",
                default="tar",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['zip', 'tar'])
        ]

        outputs = [
            ComplexOutput(
                "output_indices",
                "Climate indices for growth conditions over all timesteps",
                abstract=
                "Archive (tar/zip) containing calculated climate indices",
                supported_formats=[
                    Format('application/x-tar'),
                    Format('application/zip')
                ],
                as_reference=True,
            ),
            ComplexOutput('ncout',
                          'Subsets for one resource',
                          abstract="NetCDF file with subsets of one resource.",
                          as_reference=True,
                          supported_formats=[Format('application/x-netcdf')]),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')])
        ]

        super(SDMgetindicesProcess, self).__init__(
            self._handler,
            identifier="sdm_getindices",
            title="Species distribution Model (only indices calculation )",
            version="0.10",
            metadata=[
                Metadata("LWF", "http://www.lwf.bayern.de/"),
                Metadata(
                    "Doc",
                    "http://flyingpigeon.readthedocs.io/en/latest/descriptions/index.html#species-distribution-model"
                ),
                Metadata("paper",
                         "http://www.hindawi.com/journals/jcli/2013/787250/"),
                Metadata(
                    "Tutorial",
                    "http://flyingpigeon.readthedocs.io/en/latest/tutorials/sdm.html"
                ),
            ],
            abstract="Indices preparation for SDM process",
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 10
0
    def __init__(self):
        inputs = [
            ComplexInput(
                "resource",
                "Geospatial series",
                abstract=
                "Series of geographical features, or an url which requests such a series (ex: OGC-API)",
                supported_formats=[FORMATS.GEOJSON],
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                "index_dim",
                "Index dimension",
                abstract=
                "Name of the column in the data to be converted into the index dimension.",
                data_type="string",
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                "feat_dim",
                "Feature dimension",
                abstract=
                ("Name of a column in the data to be used as the coordinate and of "
                 " the 'feature' dimension. Each geometry must be different along and only along this dimension."
                 ),
                data_type="string",
                default="",
                min_occurs=0,
                max_occurs=1),
            LiteralInput(
                "squeeze",
                "Squeeze variables",
                abstract=
                "Squeeze variables that are replicated along one of the dimensions.",
                data_type="boolean",
                default=False,
                min_occurs=0,
                max_occurs=1,
            ),
            # LiteralInput(
            #     "grid_mapping",
            #     "Grid mapping",
            #     abstract="Name of the grid mapping of the data, only longitude_latitude supported.",
            #     data_type="string",
            #     default="longitude_latitude",
            #     min_occurs=0,
            #     max_occurs=1,
            # ),
            wpsio.output_name
        ]

        outputs = [
            ComplexOutput(
                "output",
                "Geospatial series as netCDF",
                abstract="The geospatial series as a 2 dimension netCDF.",
                as_reference=True,
                supported_formats=[FORMATS.NETCDF],
            ),
            wpsio.output_log,
        ]

        super().__init__(
            self._handler,
            identifier="geoseries_to_netcdf",
            version="0.1",
            title="Convert a geospatial series to a CF-compliant netCDF.",
            abstract=(
                "Reshapes a geospatial series with a features dimensions and "
                "converts into the netCDf format, following CF convetions."),
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )

        self.status_percentage_steps = {
            "start": 5,
            "downloaded": 25,
            "converted": 75,
            "done": 99,
        }
Esempio n. 11
0
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                'NetCDF Files or archive (tar/zip) containing netCDF files.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),

            # self.BBox = self.addBBoxInput(
            #     identifier="BBox",
            #     title="Bounding Box",
            #     abstract="coordinates to define the region to be analysed",
            #     minOccurs=1,
            #     maxOccurs=1,
            #     crss=['EPSG:4326']
            #     )
            LiteralInput(
                'dateSt',
                'Start date of analysis period',
                data_type='date',
                abstract='First day of the period to be analysed',
                default='2013-07-15',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'dateEn',
                'End date of analysis period',
                data_type='date',
                abstract='Last day of the period to be analysed',
                default='2013-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'refSt',
                'Start date of reference period',
                data_type='date',
                abstract='First day of the period where analogues being picked',
                default='2013-01-01',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                'refEn',
                'End date of reference period',
                data_type='date',
                abstract='Last day of the period where analogues being picked',
                default='2014-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput("normalize",
                         "normalization",
                         abstract="Normalize by subtraction of annual cycle",
                         default='base',
                         data_type='string',
                         min_occurs=1,
                         max_occurs=1,
                         allowed_values=['None', 'base', 'sim', 'own']),
            LiteralInput(
                "seasonwin",
                "Seasonal window",
                abstract=
                "Number of days befor and after the date to be analysed",
                default='30',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "nanalog",
                "Nr of analogues",
                abstract="Number of analogues to be detected",
                default='20',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                "dist",
                "Distance",
                abstract="Distance function to define analogues",
                default='euclidean',
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['euclidean', 'mahalanobis', 'cosine', 'of']),
            LiteralInput(
                "outformat",
                "output file format",
                abstract="Choose the format for the analogue output file",
                default="ascii",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['ascii', 'netCDF4']),
            LiteralInput(
                "timewin",
                "Time window",
                abstract=
                "Number of days following the analogue day the distance will be averaged",
                default='1',
                data_type='integer',
                min_occurs=0,
                max_occurs=1,
            ),
        ]

        outputs = [
            LiteralOutput(
                "config",
                "Config File",
                abstract="Config file used for the Fortran process",
                data_type='string',
            ),
            ComplexOutput(
                "analogs",
                "Analogues File",
                abstract="mulit-column text file",
                as_reference=True,
                supported_formats=[Format("text/plain")],
            ),
            ComplexOutput(
                'output_netcdf',
                'Subsets for one dataset',
                abstract=
                "Prepared netCDF file as input for weatherregime calculation",
                as_reference=True,
                supported_formats=[Format('application/x-netcdf')]),

            # ComplexOutput("output_html", "Analogues Viewer html page",
            #               abstract="Interactive visualization of calculated analogues",
            #               data_formats=[Format("text/html")],
            #               as_reference=True,
            #               )
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
        ]

        super(AnalogsmodelProcess, self).__init__(
            self._handler,
            identifier="analogs_model",
            title="Analogues of circulation (based on climate model data)",
            abstract=
            'Search for days with analogue pressure pattern for reanalyses data sets',
            version="0.10",
            metadata=[
                Metadata('LSCE', 'http://www.lsce.ipsl.fr/en/index.php'),
                Metadata('Doc',
                         'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
    def __init__(self):
        inputs = [
            *model_experiment_ensemble(models=['EC-EARTH'],
                                       experiments=['historical'],
                                       ensembles=['r2i1p1'],
                                       start_end_year=(1850, 2005),
                                       start_end_defaults=(1980, 1989)),
            LiteralInput('ref_model',
                         'Reference Model',
                         abstract='Choose a reference model like ERA-Interim.',
                         data_type='string',
                         allowed_values=['ERA-Interim'],
                         default='ERA-Interim',
                         min_occurs=1,
                         max_occurs=1),
            LiteralInput('season',
                         'Season',
                         abstract='Choose a season like DJF.',
                         data_type='string',
                         allowed_values=['DJF', 'MAM', 'JJA', 'SON', 'ALL'],
                         default='DJF'),
        ]
        self.plotlist = [
            'TM90', 'NumberEvents', 'DurationEvents', 'LongBlockEvents',
            'BlockEvents', 'ACN', 'CN', 'BI', 'MGI', 'Z500', 'ExtraBlock',
            'InstBlock'
        ]
        outputs = [
            *outputs_from_plot_names(self.plotlist),
            ComplexOutput(
                'data_full',
                'Full Blocking Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'data_clim',
                'Clim Blocking Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(Blocking, self).__init__(
            self._handler,
            identifier="blocking",
            title="Blocking metrics and indices",
            version=runner.VERSION,
            abstract=
            "Calculate Blocking metrics that shows the mid-latitude 1D and 2D blocking indices.",
            metadata=[
                Metadata('Estimated Calculation Time', '2 minutes'),
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/latest/recipes/recipe_miles.html',
                    role=util.WPS_ROLE_DOC),
                #                Metadata(
                #                    'Media',
                #                    util.diagdata_url() + '/pydemo/pydemo_thumbnail.png',
                #                    role=util.WPS_ROLE_MEDIA),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
    def __init__(self):
        self.variables = ['tasmin']
        self.frequency = 'day'

        inputs = [
            *model_experiment_ensemble(model='bcc-csm1-1',
                                       experiment='rcp85',
                                       ensemble='r1i1p1',
                                       max_occurs=1,
                                       required_variables=self.variables,
                                       required_frequency=self.frequency),
            *year_ranges((1971, 2000),
                         start_name='start_historical',
                         end_name='end_historical'),
            *year_ranges((2060, 2080),
                         start_name='start_projection',
                         end_name='end_projection'),
            LiteralInput(
                'quantile',
                'Quantile',
                abstract=
                'Quantile defining the exceedance/non-exceedance threshold.',
                data_type='float',
                allowed_values=AllowedValue(
                    allowed_type=ALLOWEDVALUETYPE.RANGE,
                    minval=0.0,
                    maxval=1.0),
                default=0.8),
            LiteralInput(
                'min_duration',
                'Minimum duration',
                abstract=
                'Minimum duration in days of a heatwave/coldwave event.',
                data_type='integer',
                allowed_values=AllowedValue(
                    allowed_type=ALLOWEDVALUETYPE.RANGE, minval=1, maxval=366),
                default=5),
            LiteralInput(
                'operator',
                'Operator',
                abstract='Exceedance/non-exceedance of historic threshold.',
                data_type='string',
                allowed_values=['exceedances', 'non-exceedances'],
                default='non-exceedances'),
            LiteralInput('season',
                         'Season',
                         abstract='Choose a season.',
                         data_type='string',
                         allowed_values=['summer', 'winter'],
                         default='winter'),
        ]
        outputs = [
            ComplexOutput(
                'plot',
                'Extreme spell duration tasmin plot',
                abstract='Generated extreme spell duration tasmin plot.',
                as_reference=True,
                supported_formats=[Format('image/png')]),
            ComplexOutput('data',
                          'Extreme spell duration tasmin data',
                          abstract='Extreme spell duration tasmin data.',
                          as_reference=True,
                          supported_formats=[Format('application/zip')]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(HeatwavesColdwaves, self).__init__(
            self._handler,
            identifier="heatwaves_coldwaves",
            title="Heatwave and coldwave duration",
            version=runner.VERSION,
            abstract=
            """Metric showing the duration of heatwaves and coldwaves, to help understand potential changes in
                        energy demand.""",
            metadata=[
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/version2_development/recipes/recipe_heatwaves_coldwaves.html',
                    role=util.WPS_ROLE_DOC),
                Metadata('Media',
                         util.diagdata_url() +
                         '/heatwaves_coldwaves/extreme_spells_energy.png',
                         role=util.WPS_ROLE_MEDIA),
                Metadata('Estimated Calculation Time', '4 minutes'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
    def __init__(self):
        self.variables = ['zg']
        self.frequency = 'day'
        inputs = [
            *model_experiment_ensemble(model='ACCESS1-0',
                                       experiment='historical',
                                       ensemble='r1i1p1',
                                       max_occurs=1,
                                       required_variables=self.variables,
                                       required_frequency=self.frequency),
            *year_ranges((1980, 1989), start_year=1979, end_year=2018),
            LiteralInput('ref_model',
                         'Reference Model',
                         abstract='Choose a reference model like ERA-Interim.',
                         data_type='string',
                         allowed_values=['ERA-Interim'],
                         default='ERA-Interim',
                         min_occurs=1,
                         max_occurs=1),
            # Removed on request of Jost
            # LiteralInput('season', 'Season',
            #              abstract='Choose a season like DJF.',
            #              data_type='string',
            #              allowed_values=['DJF'],
            #              default='DJF'),
            # LiteralInput('nclusters', 'nclusters',
            #              abstract='Choose a number of clusters.',
            #              data_type='string',
            #              allowed_values=['4'],
            #              default='4'),
        ]
        self.plotlist = [("Regime{}".format(i), [Format('image/png')])
                         for i in range(1, 5)]
        outputs = [
            *outputs_from_plot_names(self.plotlist),
            ComplexOutput(
                'data',
                'Regime Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(WeatherRegimes, self).__init__(
            self._handler,
            identifier="weather_regimes",
            title="Weather regimes",
            version=runner.VERSION,
            abstract="""Diagnostic providing North-Atlantic Weather Regimes
                        The estimated calculation time of this process is 2 minutes for the default values supplied.
                        """,
            metadata=[
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/v2.0a2/recipes/recipe_miles.html',
                    role=util.WPS_ROLE_DOC),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 15
0
    def __init__(self):
        # more correctly the variable depends on the settings
        self.variables = ['psl', 'tas']
        self.frequency = 'mon'
        inputs = [
            *model_experiment_ensemble(model='ACCESS1-0',
                                       experiment='historical',
                                       ensemble='r1i1p1',
                                       max_occurs=1,
                                       required_variables=self.variables,
                                       required_frequency=self.frequency),
            *year_ranges((1999, 2001)),
            LiteralInput('variable',
                         'Variable',
                         abstract='Select the variable to simulate.',
                         data_type='string',
                         default='psl',
                         allowed_values=['psl', 'tas']),
            LiteralInput(
                'start_longitude',
                'Start longitude',
                abstract='Minimum longitude.',
                data_type='integer',
                default=-40,
            ),
            LiteralInput(
                'end_longitude',
                'End longitude',
                abstract='Maximum longitude.',
                data_type='integer',
                default=40,
            ),
            LiteralInput(
                'start_latitude',
                'Start latitude',
                abstract='Minimum latitude.',
                data_type='integer',
                default=30,
            ),
            LiteralInput(
                'end_latitude',
                'End latitude',
                abstract='Maximum latitude.',
                data_type='integer',
                default=50,
            ),
            LiteralInput(
                'beta',
                'Beta',
                abstract='User defined underdispersion (beta >= 0).',
                data_type='float',
                default=0.7,
            ),
            LiteralInput(
                'number_of_members',
                'Number of members',
                abstract='Number of members to be generated.',
                data_type='integer',
                default=2,
            ),
        ]
        # self.plotlist = [
        #     'TM90', 'NumberEvents', 'DurationEvents', 'LongBlockEvents', 'BlockEvents', 'ACN', 'CN', 'BI', 'MGI',
        #     'Z500', 'ExtraBlock', 'InstBlock'
        # ]
        outputs = [
            # *outputs_from_plot_names(self.plotlist),
            ComplexOutput('plot',
                          'Toy Model plot',
                          abstract='Generated synthetic model plt.',
                          as_reference=True,
                          supported_formats=[Format('image/jpeg')]),
            ComplexOutput('model',
                          'Toy Model',
                          abstract='Generated synthetic model.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(Toymodel, self).__init__(
            self._handler,
            identifier="toymodel",
            title="Toymodel",
            version=runner.VERSION,
            abstract=
            """The goal of this diagnostic is to simulate single-model ensembles from an observational dataset
                        to investigate the effect of observational uncertainty.""",
            metadata=[
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/version2_development/recipes/recipe_toymodel.html',
                    role=util.WPS_ROLE_DOC,
                ),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 16
0
    REFREEZE_FACTOR=2.6851,
    REFREEZE_EXP=0.3740,
    PET_CORRECTION=1.0000,
    HMETS_RUNOFF_COEFF=0.4739,
    PERC_COEFF=0.0114,
    BASEFLOW_COEFF_1=0.0243,
    BASEFLOW_COEFF_2=0.0069,
    TOPSOIL=310.7211,
    PHREATIC=916.1947,
)

params = LiteralInput(
    "params",
    "Comma separated list of model parameters",
    abstract="Parameters: " + ", ".join(f.name
                                        for f in fields(params_defaults)),
    data_type="string",
    default=", ".join(map(str, astuple(params_defaults))),
    min_occurs=0,
    max_occurs=config.max_parallel_processes,
)


class RavenHMETSProcess(RavenProcess):
    identifier = "raven-hmets"
    abstract = "HMETS hydrological model"
    title = ""
    version = ""
    model_cls = HMETS
    tuple_inputs = {"params": HMETS.Params}

    inputs = [
Esempio n. 17
0
    def __init__(self):
        inputs = [
            LiteralInput(
                'filelocation',
                'NAME run ID',
                data_type='string',
                abstract="Run ID that identifies the NAME output files"),
            LiteralInput('summarise',
                         'Summarise data',
                         data_type='string',
                         abstract='Plot summaries of each day/week/month',
                         allowed_values=['NA', 'day', 'week', 'month', 'all'],
                         default='NA'),
            LiteralInput(
                'timestamp',
                'Plot specific date and time',
                data_type='dateTime',
                abstract=
                "Plot only a specific timestamp. Excludes the creation of summary plots. "
                "Format: YYYY-MM-DD HH:MM",
                min_occurs=0),
            LiteralInput(
                'station',
                'Mark release location',
                data_type='boolean',
                abstract='Mark the location of release onto the image',
                min_occurs=0),
            LiteralInput('projection',
                         'Projection',
                         data_type='string',
                         abstract='Map projection',
                         allowed_values=['cyl', 'npstere', 'spstere'],
                         min_occurs=0),
            LiteralInput(
                'lon_bounds',
                'Longitudinal boundary',
                data_type='string',
                abstract=
                'X-axis: Min and Max longitude to plot. Format: Min,Max',
                min_occurs=0),
            LiteralInput(
                'lat_bounds',
                'Latitudinal boundary',
                data_type='string',
                abstract=
                'Y-axis: Min and Max latitude boundary. Format: Min,Max',
                min_occurs=0),
            LiteralInput(
                'scale',
                'Particle concentration scale',
                data_type='string',
                abstract=
                'Particle concentration scale. If no value is set, it will autoscale. '
                'Format: Min,Max',
                min_occurs=0),
            LiteralInput('colormap',
                         'Colour map',
                         data_type='string',
                         abstract='Matplotlib color map name',
                         default='coolwarm',
                         min_occurs=0,
                         allowed_values=['coolwarm', 'viridis', 'rainbow']),
        ]
        outputs = [
            ComplexOutput('FileContents',
                          'Plot file(s)',
                          abstract="Plot files",
                          supported_formats=[
                              Format('application/x-zipped-shp'),
                              Format('text/plain'),
                              Format('image/png'), FORMATS.GEOTIFF
                          ],
                          as_reference=True),
        ]

        super(PlotAll, self).__init__(
            self._handler,
            identifier='plotall',
            title='Plot NAME results - Concentration',
            abstract="PNG plots are generated from the NAME output files",
            version='0.1',
            metadata=[
                Metadata(
                    'NAME-on-JASMIN guide',
                    'http://jasmin.ac.uk/jasmin-users/stories/processing/'),
                Metadata('Colour maps',
                         'https://matplotlib.org/users/colormaps.html'),
            ],
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True)
Esempio n. 18
0
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'Resource',
                abstract=
                'NetCDF Files or archive (tar/zip) containing NetCDF files.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1000,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip'),
                ]),
            LiteralInput(
                "region",
                "Region",
                # abstract= countries_longname(), # need to handle special non-ascii char in countries.
                data_type='string',
                min_occurs=0,
                max_occurs=len(countries()),
                allowed_values=countries()),  # REGION_EUROPE #COUNTRIES
        ]

        ###########
        # OUTPUTS
        ###########
        outputs = [
            ComplexOutput(
                'output_nc',
                "Subsets",
                abstract="Tar archive containing the netCDF files",
                as_reference=True,
                supported_formats=[Format("application/x-tar")],
            ),
            ComplexOutput(
                'output_factsheet',
                "Climate Fact Sheet",
                abstract=
                "Short overview of the climatological situation of the selected countries",
                as_reference=True,
                supported_formats=[Format('application/pdf')],
            ),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format("text/plain")]),
        ]

        super(FactsheetProcess, self).__init__(
            self._handler,
            identifier="climatefactsheet",
            title="Climate Fact Sheet Generator",
            version="0.3",
            abstract=
            "Returns a pdf with a short overview of the climatological situation for the selected countries",
            metadata=[
                # {"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"},
                {
                    "title": "Doc",
                    "href": "http://flyingpigeon.readthedocs.io/en/latest/"
                },
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 19
0
def convert_xclim_inputs_to_pywps(params: Dict, parent=None, parse_percentiles: bool = False) -> List[PywpsInput]:
    """Convert xclim indicators properties to pywps inputs.

    If parse_percentiles is True, percentile variables (*_per) are dropped and replaced by
    a "percentile" input (a float) with a default taken from constants.
    """
    # Ideally this would be based on the Parameters docstring section rather than name conventions.
    inputs = []

    # Mapping from xclim's InputKind to data_type
    # Only for generic types
    data_types = {
        InputKind.BOOL: "boolean",
        InputKind.QUANTITY_STR: "string",
        InputKind.NUMBER: "integer",
        InputKind.NUMBER_SEQUENCE: "integer",
        InputKind.STRING: "string",
        InputKind.DAY_OF_YEAR: "string",
        InputKind.DATE: "datetime",
    }

    if parse_percentiles and parent is None:
        raise ValueError('The indicator identifier must be passed through `parent` if `parse_percentiles=True`.')

    for name, attrs in params.items():
        if (
            parse_percentiles
            and name.endswith('_per')
            and attrs['kind'] in [InputKind.VARIABLE, InputKind.OPTIONAL_VARIABLE]
        ):
            var_name = name.split('_')[0]
            inputs.append(
                LiteralInput(
                    f"perc_{var_name}",
                    title=f"{var_name} percentile",
                    abstract=f"Which percentile to compute and use as threshold for variable {var_name}.",
                    data_type="integer",
                    min_occurs=0,
                    max_occurs=1,
                    default=default_percentiles[parent][name]
                )
            )
        elif attrs['kind'] in [InputKind.VARIABLE, InputKind.OPTIONAL_VARIABLE]:
            inputs.append(make_nc_input(name))
        elif name in ["freq"]:
            inputs.append(make_freq(name, default=attrs['default'], abstract=attrs['description']))
        elif name in ["indexer"]:
            inputs.append(make_month())
            inputs.append(make_season())
        elif attrs['kind'] in data_types:
            choices = list(attrs['choices']) if 'choices' in attrs else None
            default = attrs['default'] if attrs['default'] != empty_default else None
            inputs.append(
                LiteralInput(
                    name,
                    title=name.capitalize().replace('_', ' '),
                    abstract=attrs['description'],
                    data_type=data_types[attrs['kind']],
                    min_occurs=0,
                    max_occurs=1 if attrs['kind'] != InputKind.NUMBER_SEQUENCE else 99,
                    default=default,
                    allowed_values=choices,
                )
            )
        elif attrs['kind'] < 50:
            # raise NotImplementedError(f"{parent}: {name}")
            LOGGER.warning(f"{parent}: Argument {name} of kind {attrs['kind']} is not implemented.")

    return inputs
Esempio n. 20
0
    def __init__(self):
        inputs = [
            LiteralInput(
                'variable',
                'Variable',
                data_type='string',
                abstract="Specify geophysical variable name. (Optional)",
                min_occurs=0,
                max_occurs=1),
            ComplexInput(
                'dataset',
                'Dataset',
                abstract='You may provide a URL or upload a NetCDF file.',
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1024,
                supported_formats=[Format('application/x-netcdf')]),
            # LiteralInput('dataset_opendap', 'Remote OpenDAP Data URL',
            #              data_type='string',
            #              abstract="Or provide a remote OpenDAP data URL,"
            #                       " for example:"
            #                       " http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis2.dailyavgs/surface/mslp.2016.nc",  # noqa
            #              metadata=[
            #                 Metadata(
            #                     'application/x-ogc-dods',
            #                     'https://www.iana.org/assignments/media-types/media-types.xhtml')],
            #              min_occurs=0,
            #              max_occurs=100),
        ]

        outputs = [
            ComplexOutput('output',
                          'Summary Report',
                          abstract="Summary report of check results.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
            ComplexOutput('report',
                          'Check Report',
                          abstract="Report of check result.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
            ComplexOutput(
                'report_tar',
                'Reports as tarfile',
                abstract="Report of check result for each file as tarfile.",
                as_reference=True,
                supported_formats=[Format('application/x-tar')]),
        ]

        super(CMORChecker, self).__init__(
            self._handler,
            identifier="cmor_checker",
            title="CMIP6 CMOR Checker",
            version="3.2.8",
            abstract='Calls the CMIP6 cmor checker to verify CMIP6 compliance.'
            'CMIP6 CMOR checker will verify that all attributes in the input file are present'
            ' and conform to CMIP6 for publication into ESGF.',
            metadata=[
                Metadata('CMOR Checker on GitHub',
                         'https://github.com/PCMDI/cmor'),
                Metadata('User Guide',
                         'https://cmor.llnl.gov/mydoc_cmip6_validator/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
    def __init__(self):
        inputs = [
            *model_experiment_ensemble(models=['bcc-csm1-1'],
                                       model_name='Model_historical',
                                       experiments=['historical'],
                                       experiment_name='Experiment_historical',
                                       ensembles=['r1i1p1'],
                                       ensemble_name='Ensemble_historical'),
            *year_ranges((1850, 2005), (1971, 2000),
                         start_name='start_historical',
                         end_name='end_historical'),
            *model_experiment_ensemble(models=['bcc-csm1-1'],
                                       model_name='Model_projection',
                                       experiments=['rcp85'],
                                       experiment_name='Experiment_projection',
                                       ensembles=['r1i1p1'],
                                       ensemble_name='Ensemble_projection'),
            *year_ranges((2006, 2050), (2020, 2050),
                         start_name='start_projection',
                         end_name='end_projection'),
            LiteralInput('region',
                         'Region',
                         abstract='Choose a region like Polar',
                         data_type='string',
                         allowed_values=['Polar', 'North-Atlantic'],
                         default='North-Atlantic'),
            LiteralInput('ncenters',
                         'Cluster Centers',
                         abstract='Choose a number of cluster centers.',
                         data_type='string',
                         allowed_values=['3', '4', '5'],
                         default='4'),
            LiteralInput('detrend_order',
                         'Detrend Order',
                         abstract='Choose a order of detrend.',
                         data_type='string',
                         allowed_values=['2', '1'],
                         default='2'),
            LiteralInput('cluster_method',
                         'Cluster Method',
                         abstract='Choose a clustering method.',
                         data_type='string',
                         allowed_values=['kmeans', 'hclust'],
                         default='kmeans'),
            LiteralInput('eofs',
                         'EOFs',
                         abstract='Calculate EOFs?',
                         data_type='string',
                         allowed_values=['True', 'False'],
                         default=True),
            LiteralInput(
                'frequency',
                'Frequency',
                abstract='Choose a frequency like JAN.',
                data_type='string',
                allowed_values=[
                    'JAN',
                    'FEB',
                    'MAR',
                    'APR',
                    'MAY',
                    'JUN',
                    'JUL',
                    'AUG',
                    'SEP',
                    'OCT',
                    'NOV',
                    'DEC',
                    'JJA',
                    'SON',
                    'DJF'  # 'MAM' <- does not work yet
                ],
                default='JJA'),
        ]
        self.plotlist = [
            'Table_psl', 'psl_predicted_regimes', 'psl_observed_regimes'
        ]
        outputs = [
            *outputs_from_plot_names(self.plotlist),
            ComplexOutput(
                'rmse',
                'RMSE Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'exp',
                'EXP Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'obs',
                'OBS Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as an zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(ModesVariability, self).__init__(
            self._handler,
            identifier="modes_of_variability",
            title="Modes of variability",
            version=runner.VERSION,
            abstract="""Diagnostics showing the RMSE between the observed and
            modelled patterns of variability obtained through classification
            and their relative relative bias (percentage) in the frequency of
            occurrence and the persistence of each mode.""",
            metadata=[
                Metadata('Estimated Calculation Time', '30 seconds'),
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/latest/recipes/recipe_modes_of_variability.html',
                    role=util.WPS_ROLE_DOC),
                #                Metadata(
                #                    'Media',
                #                    util.diagdata_url() + '/pydemo/pydemo_thumbnail.png',
                #                    role=util.WPS_ROLE_MEDIA),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 22
0
    def __init__(self):
        self.variables = ['pr', 'tas']
        self.frequency = 'mon'

        inputs = [
            *model_experiment_ensemble(model='ACCESS1-0',
                                       experiment='historical',
                                       ensemble='r1i1p1',
                                       max_occurs=1,
                                       required_variables=self.variables,
                                       required_frequency=self.frequency),
            *year_ranges((1990, 1999)),
            LiteralInput('ref_dataset',
                         'Reference Dataset',
                         abstract='Choose a reference dataset like ERA-Interim.',
                         data_type='string',
                         allowed_values=['ERA-Interim'],
                         default='ERA-Interim',
                         min_occurs=1,
                         max_occurs=1),
        ]
        self.plotlist = []
        outputs = [
            ComplexOutput('spi_plot',
                          'SPI Histogram plot',
                          abstract='Generated spi histogram plot.',
                          as_reference=True,
                          supported_formats=[Format('image/png')]),
            ComplexOutput('spei_plot',
                          'SPEI Histogram plot',
                          abstract='Generated SPEI Histogram plot.',
                          as_reference=True,
                          supported_formats=[Format('image/png')]),
            ComplexOutput('spi_model',
                          'SPI Data for the model',
                          abstract='The complete SPI Data for the model.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
            ComplexOutput('spi_reference',
                          'SPI Data for the reference model',
                          abstract='The complete SPI Data for the reference model.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
            ComplexOutput('spei_model',
                          'SPEI Data for the model',
                          abstract='The complete SPEI Data for the model.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
            ComplexOutput('spei_reference',
                          'SPEI Data for the reference model',
                          abstract='The complete SPEI Data for the reference model.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
            ComplexOutput('archive',
                          'Archive',
                          abstract='The complete output of the ESMValTool processing as an zip archive.',
                          as_reference=True,
                          supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(DroughtIndicator, self).__init__(
            self._handler,
            identifier="drought_indicator",
            title="Drought indicator",
            version=runner.VERSION,
            abstract="""The drought indicator calculates diagnostics for meteorological drought.""",
            metadata=[
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata('Documentation',
                         'https://esmvaltool.readthedocs.io/en/version2_development/recipes/recipe_spei.html',
                         role=util.WPS_ROLE_DOC),
                Metadata('Estimated Calculation Time', '45 minutes'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
    def __init__(self):
        inputs = [
            ComplexInput(
                'resource',
                'netCDF dataset',
                abstract="Dissimilarity between target at selected "
                "location and candidate distributions over the entire grid.",
                metadata=[Metadata('Info')],
                min_occurs=1,
                max_occurs=1,
                supported_formats=[
                    Format('application/x-netcdf'),
                    Format('application/x-tar'),
                    Format('application/zip')
                ]),
            LiteralInput('fmt',
                         'format',
                         abstract="Figure format",
                         data_type='string',
                         min_occurs=0,
                         max_occurs=5,
                         default='png',
                         allowed_values=['png', 'pdf', 'svg', 'ps', 'eps']),
            LiteralInput('title',
                         'Title',
                         abstract="Figure title.",
                         data_type='string',
                         min_occurs=0,
                         max_occurs=1,
                         default='')
        ]

        outputs = [
            ComplexOutput(
                'output_figure',
                'Dissimilarity map',
                abstract="Map of dissimilarity values.",
                as_reference=True,
                supported_formats=[
                    Format('image/png'),
                    Format('application/pdf'),
                    Format('image/svg+xml'),
                    Format('application/postscript'),
                ],
            ),
            ComplexOutput('output_log',
                          'Logging information',
                          abstract="Collected logs during process run.",
                          as_reference=True,
                          supported_formats=[Format('text/plain')]),
        ]

        super(MapSpatialAnalogProcess, self).__init__(
            self._handler,
            identifier="map_spatial_analog",
            title=
            "Map of dissimilarity values calculated by the spatial_analog process.",
            abstract=
            "Produce map showing the dissimilarity values computed by the spatial_analog process as well as indicating by a marker the location of the target site.",
            version="0.1",
            metadata=[
                Metadata('Doc',
                         'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 24
0
from werkzeug.serving import run_simple
import wps.wps as handler
from pywps import Service, Process, LiteralInput, ComplexInput, Format, LiteralOutput
from pywps.inout import BBoxOutput

process = Process(handler.handler,
                  identifier='file_checker',
                  inputs=[
                      LiteralInput('type', 'string'),
                      LiteralInput('options', 'string'),
                      ComplexInput('file', [Format('text/UTF-8')]),
                  ],
                  outputs=[
                      LiteralOutput('msg', 'string'),
                  ])

#outputs=[ LiteralInput('success', 'boolean'),
#                             BBoxOutput()])

service = Service([process])

run_simple('localhost', 5000, service, use_reloader=True)

# test url on my mac
# http://localhost:5000/ows?service=WPS&version=1.0.0&request=Execute&identifier=file_checker&DataInputs=type=roms;options=2D;file=file:///Users/uwer/projects/models/roms/test/marvl/41/roms_grid.nc
Esempio n. 25
0
    DD_REFREEZE_TEMP=2.0,
    REFREEZE_FACTOR=5.0,
    REFREEZE_EXP=1.0,
    PET_CORRECTION=3.0,
    HMETS_RUNOFF_COEFF=1.0,
    PERC_COEFF=0.02,
    BASEFLOW_COEFF_1=0.1,
    BASEFLOW_COEFF_2=0.01,
    TOPSOIL=0.5,
    PHREATIC=2.0,
)

upperBounds = LiteralInput(
    "upperBounds",
    "Comma separated list of model parameters Upper Bounds",
    abstract="UParameters: " + ", ".join(Uparams_defaults._fields),
    data_type="string",
    default=", ".join(str(p) for p in list(Uparams_defaults)),
    min_occurs=0,
)

lowerBounds = LiteralInput(
    "lowerBounds",
    "Comma separated list of model parameters Lower Bounds",
    abstract="LParameters: " + ", ".join(Lparams_defaults._fields),
    data_type="string",
    default=", ".join(str(p) for p in list(Lparams_defaults)),
    min_occurs=0,
)


class OstrichHMETSProcess(OstrichProcess):
Esempio n. 26
0
    def __init__(self):
        inputs = [
            LiteralInput('string',
                         'String',
                         data_type='string',
                         abstract='Enter a simple string.',
                         default="This is just a string",
                         mode=MODE.SIMPLE),
            LiteralInput(
                'int',
                'Integer',
                data_type='integer',
                abstract='Choose an integer number from allowed values.',
                default="7",
                allowed_values=[1, 2, 3, 5, 7, 11]),
            LiteralInput('float',
                         'Float',
                         data_type='float',
                         abstract='Enter a float number.',
                         default="3.14",
                         min_occurs=0,
                         max_occurs=5),
            # TODO: boolean default is not displayed in phoenix
            LiteralInput('boolean',
                         'Boolean',
                         data_type='boolean',
                         abstract='Make your choice :)',
                         default='1'),
            LiteralInput('angle',
                         'Angle',
                         data_type='angle',
                         abstract='Enter an angle [0, 360] :)',
                         default='90'),
            LiteralInput('time',
                         'Time',
                         data_type='time',
                         abstract='Enter a time like 12:00:00',
                         default='12:00:00'),
            LiteralInput('date',
                         'Date',
                         data_type='date',
                         abstract='Enter a date like 2012-05-01',
                         default='2012-05-01'),
            LiteralInput('datetime',
                         'Datetime',
                         data_type='dateTime',
                         abstract='Enter a datetime like 2016-09-02T12:00:00Z',
                         default='2016-09-02T12:00:00Z'),
            LiteralInput('string_choice',
                         'String Choice',
                         data_type='string',
                         abstract='Choose one item form list.',
                         allowed_values=['rock', 'paper', 'scissor'],
                         default='scissor'),
            LiteralInput('string_multiple_choice',
                         'String Multiple Choice',
                         abstract='Choose one or two items from list.',
                         data_type='string',
                         allowed_values=[
                             'sitting duck', 'flying goose', 'happy pinguin',
                             'gentle albatros'
                         ],
                         min_occurs=0,
                         max_occurs=2,
                         default='gentle albatros'),
            LiteralInput(
                'int_range',
                'Integer Range',
                abstract=
                'Choose number from range: 1-10 (step 1), 100-200 (step 10)',
                metadata=[
                    Metadata(
                        'PyWPS Docs',
                        'https://pywps.readthedocs.io/en/master/api.html#pywps.inout.literaltypes.AllowedValue'
                    ),  # noqa
                    Metadata(
                        'AllowedValue Example',
                        'http://docs.opengeospatial.org/is/14-065/14-065.html#98'
                    ),  # noqa
                ],
                data_type='integer',
                default='1',
                allowed_values=[
                    AllowedValue(minval=1, maxval=10),
                    AllowedValue(minval=100, maxval=200, spacing=10)
                ],
                mode=MODE.SIMPLE,
            ),
            LiteralInput(
                'any_value',
                'Any Value',
                abstract='Enter any value.',
                metadata=[
                    Metadata(
                        'PyWPS Docs',
                        'https://pywps.readthedocs.io/en/master/api.html#pywps.inout.literaltypes.AnyValue'
                    ),  # noqa
                ],
                allowed_values=AnyValue(),
                default='any value',
                mode=MODE.SIMPLE,
            ),
            LiteralInput(
                'ref_value',
                'Referenced Value',
                abstract='Choose a referenced value',
                metadata=[
                    Metadata(
                        'PyWPS Docs',
                        'https://pywps.readthedocs.io/en/master/_modules/pywps/inout/literaltypes.html'
                    ),  # noqa
                ],
                data_type='string',
                allowed_values=ValuesReference(
                    reference=
                    "https://en.wikipedia.org/w/api.php?action=opensearch&search=scotland&format=json"
                ),  # noqa
                default='Scotland',
                mode=MODE.SIMPLE,
            ),
            # TODO: bbox is not supported yet by owslib
            # BoundingBoxInput('bbox', 'Bounding Box',
            #                  abstract='Bounding Box with EPSG:4326 and EPSG:3035.',
            #                  crss=['epsg:4326', 'epsg:3035'],
            #                  min_occurs=0),
            ComplexInput(
                'text',
                'Text',
                abstract='Enter a URL pointing to a text document (optional)',
                metadata=[Metadata('Info')],
                min_occurs=0,
                supported_formats=[Format('text/plain')]),
            ComplexInput(
                'dataset',
                'Dataset',
                abstract="Enter a URL pointing to a NetCDF file (optional)",
                metadata=[
                    Metadata(
                        'NetCDF Format',
                        'https://en.wikipedia.org/wiki/NetCDF',
                        role=
                        'http://www.opengis.net/spec/wps/2.0/def/process/description/documentation'
                    )
                ],
                min_occurs=0,
                supported_formats=[FORMATS.NETCDF]),
        ]
        outputs = [
            LiteralOutput('string', 'String', data_type='string'),
            LiteralOutput('int', 'Integer', data_type='integer'),
            LiteralOutput('float', 'Float', data_type='float'),
            LiteralOutput('boolean', 'Boolean', data_type='boolean'),
            LiteralOutput('angle', 'Angle', data_type='angle'),
            LiteralOutput('time', 'Time', data_type='time'),
            LiteralOutput('date', 'Date', data_type='date'),
            LiteralOutput('datetime', 'DateTime', data_type='dateTime'),
            LiteralOutput('string_choice', 'String Choice',
                          data_type='string'),
            LiteralOutput('string_multiple_choice',
                          'String Multiple Choice',
                          data_type='string'),
            LiteralOutput('int_range', 'Integer Range', data_type='integer'),
            LiteralOutput('any_value', 'Any Value', data_type='string'),
            LiteralOutput('ref_value', 'Referenced Value', data_type='string'),
            ComplexOutput('text',
                          'Text',
                          abstract='Copy of input text file.',
                          as_reference=False,
                          supported_formats=[
                              FORMATS.TEXT,
                          ]),
            ComplexOutput('dataset',
                          'Dataset',
                          abstract='Copy of input netcdf file.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF, FORMATS.TEXT]),
            BoundingBoxOutput('bbox', 'Bounding Box', crss=['epsg:4326']),
        ]

        super(InOut, self).__init__(
            self._handler,
            identifier="inout",
            title="In and Out",
            version="1.0",
            abstract="Testing all WPS input and output parameters.",
            # profile=['birdhouse'],
            metadata=[
                Metadata('Birdhouse', 'http://bird-house.github.io/'),
                Metadata(
                    'User Guide',
                    'http://emu.readthedocs.io/en/latest/',
                    role=
                    'http://www.opengis.net/spec/wps/2.0/def/process/description/documentation'
                )
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
Esempio n. 27
0
    def __init__(self):
        self.variables = ['pr']
        self.frequency = 'mon'

        inputs = [
            *model_experiment_ensemble(model='MPI-ESM-P',
                                       experiment='historical',
                                       ensemble='r1i1p1',
                                       max_occurs=1,
                                       required_variables=self.variables,
                                       required_frequency=self.frequency),
            *year_ranges((1997, 1997)),
            LiteralInput('ref_dataset',
                         'Reference Dataset',
                         abstract='Choose a reference dataset like GPCP-SG.',
                         data_type='string',
                         allowed_values=['GPCP-SG'],
                         default='GPCP-SG',
                         min_occurs=1,
                         max_occurs=1),
            LiteralInput('perc_lev',
                         'Quantile',
                         abstract='Quantile in percentage (%).',
                         data_type='integer',
                         default=75),
        ]

        outputs = [
            ComplexOutput(
                'model',
                'Model Quantile Data',
                abstract='Generated output data of ESMValTool processing.',
                as_reference=True,
                supported_formats=[FORMATS.NETCDF]),
            ComplexOutput(
                'archive',
                'Archive',
                abstract=
                'The complete output of the ESMValTool processing as a zip archive.',
                as_reference=True,
                supported_formats=[Format('application/zip')]),
            *default_outputs(),
        ]

        super(QuantileBias, self).__init__(
            self._handler,
            identifier="quantile_bias",
            title="Quantile Bias",
            version=runner.VERSION,
            abstract=
            """Diagnostic showing the quantile bias between models and a reference dataset.""",
            metadata=[
                Metadata('ESMValTool', 'http://www.esmvaltool.org/'),
                Metadata(
                    'Documentation',
                    'https://esmvaltool.readthedocs.io/en/version2_development/recipes/recipe_quantilebias.html',
                    role=util.WPS_ROLE_DOC,
                ),
                Metadata('Estimated Calculation Time', '1 Minute'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True)
    def __init__(self):
        self.status_percentage_steps = {
            "start": 0,
            "dry_run": 5,
            "process": 10,
            "collect_files": 90,
            "build_output": 95,
            "complete": 100,
        }
        inputs = [
            ComplexInput(
                "prec",
                "Precipitation",
                abstract="Precipitation file to process",
                min_occurs=1,
                max_occurs=1,
                supported_formats=[FORMATS.NETCDF, FORMATS.DODS],
            ),
            ComplexInput(
                "tasmin",
                "Tasmin",
                abstract="Tasmin file to process",
                min_occurs=1,
                max_occurs=1,
                supported_formats=[FORMATS.NETCDF, FORMATS.DODS],
            ),
            ComplexInput(
                "tasmax",
                "Tasmax",
                abstract="Tasmax file to process",
                min_occurs=1,
                max_occurs=1,
                supported_formats=[FORMATS.NETCDF, FORMATS.DODS],
            ),
            LiteralInput(
                "chunk_size",
                "Chunk Size",
                default=100,
                abstract="Number of time slices to be read/written at a time",
                data_type="integer",
            ),
            LiteralInput(
                "output_file",
                "Output File",
                default="None",
                abstract="Optional custom name of output file",
                data_type="string",
            ),
            log_level,
            dryrun_input,
        ]
        outputs = [
            nc_output,
            dryrun_output,
        ]

        super(GeneratePrsn, self).__init__(
            self._handler,
            identifier="generate_prsn",
            title="Generate Precipitation as Snow",
            abstract="Generate precipitation as snow file from precipitation "
            "and minimum/maximum temperature data",
            metadata=[
                Metadata("NetCDF processing"),
                Metadata("Climate Data Operations"),
            ],
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True,
        )
Esempio n. 29
0
###############################################################################

__author__ = 'Jachym Cepicky'

from pywps import Process, LiteralInput, ComplexOutput, ComplexInput, Format
from pywps.app.Common import Metadata
from pywps.validator.mode import MODE
from pywps.inout.formats import FORMATS
from pywps.response.status import WPS_STATUS

inpt_vector = ComplexInput('vector',
                           'Vector map',
                           supported_formats=[Format('application/gml+xml')],
                           mode=MODE.STRICT)

inpt_size = LiteralInput('size', 'Buffer size', data_type='float')

out_output = ComplexOutput('output',
                           'HelloWorld Output',
                           supported_formats=[Format('application/gml+xml')])

inputs = [inpt_vector, inpt_size]
outputs = [out_output]


class DemoBuffer(Process):
    def __init__(self):

        super(DemoBuffer, self).__init__(
            _handler,
            identifier='demobuffer',
Esempio n. 30
0
    def __init__(self):
        inputs = [
            LiteralInput(
                "products",
                "Earth Observation Product",
                abstract="Choose Earth Observation Products (up to five)",
                default="PSScene3Band__visual",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=_EODATA_),
            LiteralInput(
                'BBox',
                'Bounding Box',
                data_type='string',
                abstract="Enter a bbox: min_lon, max_lon, min_lat, max_lat."
                " min_lon=Western longitude,"
                " max_lon=Eastern longitude,"
                " min_lat=Southern or northern latitude,"
                " max_lat=Northern or southern latitude."
                " For example: -80,50,20,70",
                min_occurs=1,
                max_occurs=1,
                default='14,15,8,9',
            ),
            LiteralInput(
                'start',
                'Start Date',
                data_type='date',
                abstract='First day of the period to be searched for EO data.'
                '(if not set, 30 days befor end of period will be selected',
                default=(dt.now() - timedelta(days=30)).strftime('%Y-%m-%d'),
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                'end',
                'End Date',
                data_type='date',
                abstract='Last day of the period to be searched for EO data.'
                '(if not set, current day is set.)',
                default=dt.now().strftime('%Y-%m-%d'),
                min_occurs=0,
                max_occurs=1,
            ),
            LiteralInput(
                'token',
                'Authentification',
                data_type='string',
                abstract=
                'Authentification token generated by Planet Earth Observation Explorer.',
                # default='2013-12-31',
                min_occurs=1,
                max_occurs=1,
            ),
            LiteralInput(
                "archive_format",
                "Archive format",
                abstract="Result files will be compressed into archives.\
                                  Choose an appropriate format",
                default="tar",
                data_type='string',
                min_occurs=1,
                max_occurs=1,
                allowed_values=['zip', 'tar'])

            #
            # ComplexInput('resource', 'Resource',
            #              abstract="NetCDF Files or archive (tar/zip) containing netCDF files.",
            #              min_occurs=1,
            #              max_occurs=1000,
            #              #  maxmegabites=5000,
            #              supported_formats=[Format('application/x-netcdf'),
            #                                 Format('application/x-tar'),
            #                                 Format('application/zip'),
            #                                 ]
            #              )
        ]

        outputs = [
            ComplexOutput(
                "output_archive",
                "geotif files",
                abstract="Archive (tar/zip) containing merged geotif",
                supported_formats=[
                    Format('application/x-tar'),
                    Format('application/zip')
                ],
                as_reference=True,
            ),
            ComplexOutput(
                'output_png',
                'Example plot',
                abstract=
                "Example plot of a geotif file for quickcheck purpose.",
                as_reference=True,
                supported_formats=[Format('image/png')]),
            ComplexOutput(
                "output_log",
                "Logging information",
                abstract="Collected logs during process run.",
                supported_formats=[Format("text/plain")],
                as_reference=True,
            )
        ]

        super(MergeProcess, self).__init__(
            self._handler,
            identifier="EO_merge",
            title="Earth Observation merge all file per day",
            version="0.1",
            abstract=
            "Fetch EO Data and merge the tiles of the same day to a mosaic",
            metadata=[
                Metadata('Documentation',
                         'http://flyingpigeon.readthedocs.io/en/latest/'),
            ],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 31
0
    def __init__(self):
        inputs = [
            LiteralInput(
                "latitude",
                "Outlet latitude",
                data_type="float",
                abstract="Latitudinal coordinate of the watershed outlet.",
            ),
            LiteralInput(
                "longitude",
                "Outlet longitude",
                data_type="float",
                abstract="Longitudinal coordinate of the watershed outlet.",
            ),
            LiteralInput(
                "name",
                "Watershed name",
                data_type="string",
                abstract="Name of the watershed.",
            ),
            ComplexInput(
                "dem",
                "Digital Elevation Model",
                abstract="An URL pointing at the DEM to be used to compute the watershed boundary. Defaults "
                "to the HydroSheds DEM.",  # TODO: Include details (resolution, version).
                metadata=[
                    Metadata("HydroSheds Database", "http://hydrosheds.org"),
                    Metadata(
                        "Lehner, B., Verdin, K., Jarvis, A. (2008): New global "
                        "hydrography derived from "
                        "spaceborne elevation data. Eos, Transactions, AGU, 89(10): 93-94.",
                        "https://doi.org/10.1029/2008EO100001",
                    ),
                ],
                min_occurs=0,
                default="",  # TODO: Enter default DEM from PAVICS
                supported_formats=[FORMATS.GEOTIFF, FORMATS.GML, FORMATS.WCS],
            ),
            ComplexInput(
                "dir",
                "Flow direction grid",
                abstract="An URL pointing at the flow direction grid to be used to compute the watershed "
                "boundary. Defaults to the HydroSheds product. If both the DEM and the flow "
                "direction are give, the flow direction supercedes the DEM.",
                # TODO: Include details (resolution, version).
                metadata=[
                    Metadata("HydroSheds Database", "http://hydrosheds.org"),
                    Metadata(
                        "Lehner, B., Verdin, K., Jarvis, A. (2008): New global "
                        "hydrography derived from "
                        "spaceborne elevation data. Eos, Transactions, AGU, 89(10): 93-94.",
                        "https://doi.org/10.1029/2008EO100001",
                    ),
                ],
                min_occurs=0,
                default="",  # TODO: Enter default DIR from PAVICS
                supported_formats=[FORMATS.GEOTIFF, FORMATS.GML, FORMATS.WCS],
            ),
        ]
        outputs = [
            ComplexOutput(
                "boundary",
                "Watershed boundary",
                abstract="A polygon defining the watershed boundary.",
                as_reference=True,
                supported_formats=FORMATS.GML,
            ),
        ]

        super(WatershedDelineation, self).__init__(
            self._pysheds_handler,
            identifier="watershed_delineation",
            title="Watershed delineation algorithm",
            version="1.0",
            abstract="Return the boundary of a watershed computed using a digital elevation model.",
            metadata=[],
            inputs=inputs,
            outputs=outputs,
            status_supported=True,
            store_supported=True,
        )
Esempio n. 32
0
    def __init__(self):
        # Should specify whether indices are inclusive or exclusive.
        inputs = [LiteralInput('opendap_url',
                               'OPeNDAP url to a NetCDF file',
                               abstract='OPeNDAP url to a NetCDF file.',
                               data_type='string'),
                  LiteralInput('variable_name',
                               'Variable name in the NetCDF file',
                               abstract='Variable name in the NetCDF file.',
                               data_type='string'),
                  LiteralInput('time_initial_indice',
                               'Initial indice for time dimension',
                               abstract='Initial indice for time dimension.',
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('time_final_indice',
                               'Final indice for time dimension',
                               abstract='Final indice for time dimension.',
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('level_initial_indice',
                               'Initial indice for level dimension',
                               abstract='Initial indice for level dimension.',
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('level_final_indice',
                               'Final indice for level dimension',
                               abstract='Final indice for level dimension.',
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('spatial1_initial_indice',
                               'Initial indice for first spatial dimension',
                               abstract=('Initial indice for first spatial '
                                         'dimension.'),
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('spatial1_final_indice',
                               'Final indice for first spatial dimension',
                               abstract=('Final indice for first spatial '
                                         'dimension.'),
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('spatial2_initial_indice',
                               'Initial indice for second spatial dimension',
                               abstract=('Initial indice for second spatial '
                                         'dimension.'),
                               data_type='integer',
                               min_occurs=0),
                  LiteralInput('spatial2_final_indice',
                               'Final indice for second spatial dimension',
                               abstract=('Final indice for second spatial '
                                         'dimension.'),
                               data_type='integer',
                               min_occurs=0),]

        outputs = [ComplexOutput('plotly_result',
                                 'Plotly result',
                                 abstract='Plotly result.',
                                 supported_formats=[json_format])]
        outputs[0].as_reference = True

        super(NCPlotly,self).__init__(
            self._handler,
            identifier='ncplotly',
            title='Plotly time series data from netCDF file.',
            abstract='Return a dictionary storing the data necessary to create a simple plotly time series.',
            version='0.1',
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True)
Esempio n. 33
0
    def __init__(self):
        inputs = [
            LiteralInput(
                'filelocation',
                'NAME run ID',
                data_type='string',
                abstract='Run ID that identifies the NAME output files'),
            LiteralInput('summarise',
                         'Summarise data',
                         data_type='string',
                         abstract='Plot summaries of each day/week/month',
                         allowed_values=['NA', 'day', 'week', 'month', 'all'],
                         default='NA'),
            LiteralInput(
                'timestamp',
                'Plot specific date and time',
                data_type='dateTime',
                abstract=
                'Plot only a specific timestamp. Excludes the creation of summary plots. '
                'Format: YYYY-MM-DD HH:MM:SSZ',
                min_occurs=0),
            LiteralInput(
                'station',
                'Mark release location',
                data_type='boolean',
                abstract='Mark the location of release onto the image',
                min_occurs=0),
            LiteralInput('projection',
                         'Projection',
                         data_type='string',
                         abstract='Map projection',
                         allowed_values=['cyl', 'npstere', 'spstere'],
                         min_occurs=0),
            # bbox not working:
            # BoundingBoxInput('domain', 'Computational Domain', crss=['epsg:4326'],
            #                  abstract='Coordinates to plot within',
            #                  min_occurs=0),
            # Temporary bbox solution
            LiteralInput(
                'min_lon',
                'Minimum longitude',
                abstract=
                'Minimum longitude for plot boundary. Note that reducing the size of the bounds will speed up the run-time of the process.',
                data_type='float',
                default=-180,
                min_occurs=1),
            LiteralInput(
                'max_lon',
                'Maximum longitude',
                abstract=
                'Maximum longitude for plot boundary. Note that reducing the size of the bounds will speed up the run-time of the process.',
                data_type='float',
                default=180,
                min_occurs=1),
            LiteralInput(
                'min_lat',
                'Minimum latitude',
                abstract=
                'Minimum latitude for plot boundary. Note that reducing the size of the bounds will speed up the run-time of the process.',
                data_type='float',
                default=-90,
                min_occurs=1),
            LiteralInput(
                'max_lat',
                'Maximum latitude',
                abstract=
                'Maximum latitude for plot boundary. Note that reducing the size of the bounds will speed up the run-time of the process.',
                data_type='float',
                default=90,
                min_occurs=1),
            LiteralInput(
                'scale',
                'Particle concentration scale',
                data_type='string',
                abstract=
                'Particle concentration scale. If no value is set, it will autoscale. '
                'Format: Min,Max',
                min_occurs=0),
            LiteralInput('colormap',
                         'Colour map',
                         data_type='string',
                         abstract='Matplotlib color map name',
                         default='coolwarm',
                         min_occurs=0,
                         allowed_values=['coolwarm', 'viridis', 'rainbow']),
        ]
        outputs = [
            ComplexOutput('FileContents',
                          'Plot file(s)',
                          abstract='Plot files',
                          supported_formats=[
                              Format('application/x-zipped-shp'),
                              Format('text/plain'),
                              Format('image/png'), FORMATS.GEOTIFF
                          ],
                          as_reference=True),
        ]

        super(PlotNAME, self).__init__(
            self._handler,
            identifier='plot_name',
            title='Plot NAME results',
            abstract='Generate plots from a completed NAME job.',
            version='0.1',
            metadata=[
                Metadata(
                    'NAME-on-JASMIN guide',
                    'http://jasmin.ac.uk/jasmin-users/stories/processing/'),
                Metadata('Colour maps',
                         'https://matplotlib.org/users/colormaps.html'),
                Metadata(
                    'Process image',
                    'https://name-staging.ceda.ac.uk/static/phoenix/img/NAME_banner_dark.png',
                    'http://www.opengis.net/spec/wps/2.0/def/process/description/media'
                ),
            ],
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True)
Esempio n. 34
0
    def __init__(self):
        inputs = [
            LiteralInput('min_lon',
                         'Minimum longitude',
                         data_type='integer',
                         default=-180,
                         min_occurs=1),
            LiteralInput('max_lon',
                         'Maximum longitude',
                         data_type='integer',
                         default=180,
                         min_occurs=1),
            LiteralInput('min_lat',
                         'Minimum latitude',
                         data_type='integer',
                         default=-90,
                         min_occurs=1),
            LiteralInput('max_lat',
                         'Maximum latitude',
                         data_type='integer',
                         default=90,
                         min_occurs=1),
            LiteralInput(
                'variable',
                'Variable',
                abstract=
                'Choose a variable like vas (northward near-Surface wind).',
                data_type='string',
                allowed_values=['pr', 'tas', 'tasmax', 'tasmin', 'vas', 'uas'],
                default='tas'),
            LiteralInput('model',
                         'Model',
                         abstract='Choose a model like HadGEM2-ES.',
                         data_type='string',
                         allowed_values=[
                             'HadGEM2-ES',
                             'HadCM3',
                             'GFDL-CM2p1',
                             'bcc-csm1-1-m',
                             'bcc-csm1-1',
                             'BNU-ESM',
                         ],
                         default='HadCM3'),
        ]
        outputs = [
            ComplexOutput('output',
                          'NetCDF file',
                          abstract='A single NetCDF file.',
                          as_reference=True,
                          supported_formats=[FORMATS.NETCDF]),
        ]

        super(Exercise, self).__init__(
            self._handler,
            identifier='get_cutout',
            title='Get CMIP5 RCP45 2010s average cutout',
            abstract='WPS process for extracting UK domain from CMIP5 data',
            #keywords=['hello', 'demo'],
            metadata=[
                Metadata('PyWPS', 'https://pywps.org/'),
                Metadata('Birdhouse', 'http://bird-house.github.io/'),
                Metadata('PyWPS Demo',
                         'https://pywps-demo.readthedocs.io/en/latest/'),
                Metadata('Emu: PyWPS examples',
                         'https://emu.readthedocs.io/en/latest/'),
            ],
            version='0.1',
            inputs=inputs,
            outputs=outputs,
            store_supported=True,
            status_supported=True)