Example #1
0
def submitCustomBioclim(processid="org.n52.wps.server.r.gridded_bioclim",
                        outputfname=None,
                        verbose=False,
                        **kwargs):
    '''Makes a call to the WPS algorithm wrapping an R script which generates 
    custom Bioclim variables. The processid is the locator of our R script.
        defaults to: org.n52.wps.server.r.gridded_bioclim
    outputfname is the path to save the returned zip to
    the remaining argumnets are:
        OPeNDAP_URI: A locator of a valid OPenDAP dataset
            defaults to: https://cida.usgs.gov/thredds/dodsC/prism
        tmax_var: the name of the max temp variable
            defaults to 
        tmin_var: the name of the min temp variable
        prcp_var: the name of the prcp vaiable,
        tave_var: the name of the tave_var
            defaults to "Null"
        bioclims: A list of integers (1-19) of the bioclim vars to return
        start: The year to begin with
        end: The year to end with
        bbox_in:  The bounding box to use
                    four item tuple (max long, min lat, min long, max lat) 
    '''
    _validate_bioclim_inputs(outputfname, verbose=verbose, **kwargs)
    inputs = _parse_bioclim_inputs(**kwargs)
    output = _execute_request._executeRequest(processid, inputs, "name",
                                              verbose, outputfname)
    return output
Example #2
0
def submitCustomBioclim(processid="org.n52.wps.server.r.gridded_bioclim", 
                        outputfname=None, verbose=False, **kwargs):
    '''Makes a call to the WPS algorithm wrapping an R script which generates 
    custom Bioclim variables. The processid is the locator of our R script.
        defaults to: org.n52.wps.server.r.gridded_bioclim
    outputfname is the path to save the returned zip to
    the remaining argumnets are:
        OPeNDAP_URI: A locator of a valid OPenDAP dataset
            defaults to: http://cida.usgs.gov/thredds/dodsC/prism
        tmax_var: the name of the max temp variable
            defaults to 
        tmin_var: the name of the min temp variable
        prcp_var: the name of the prcp vaiable,
        tave_var: the name of the tave_var
            defaults to "Null"
        bioclims: A list of integers (1-19) of the bioclim vars to return
        start: The year to begin with
        end: The year to end with
        bbox_in:  The bounding box to use
                    four item tuple (max long, min lat, min long, max lat) 
    '''
    _validate_bioclim_inputs(outputfname, verbose=verbose, **kwargs)
    inputs = _parse_bioclim_inputs(**kwargs)
    output=_execute_request._executeRequest(processid, inputs, "name", verbose, outputfname)
    return output
Example #3
0
def submitFeatureCoverageWCSIntersection(geoType,
                                         dataSetURI,
                                         varID,
                                         attribute='the_geom',
                                         value=None,
                                         gmlIDs=None,
                                         verbose=False,
                                         coverage='true',
                                         WFS_URL=None,
                                         outputfname=None,
                                         sleepSecs=10):
    """
    Makes a featureCoverageWCSIntersection algorithm call. 
    """

    featureCollection = _get_geotype._getFeatureCollectionGeoType(
        geoType, attribute, value, gmlIDs, WFS_URL)
    if featureCollection is None:
        return
    processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageIntersectionAlgorithm'
    inputs = [("DATASET_URI", dataSetURI), ("DATASET_ID", varID),
              ("REQUIRE_FULL_COVERAGE", coverage),
              ("FEATURE_COLLECTION", featureCollection)]
    output = "OUTPUT"
    return _execute_request._executeRequest(processid, inputs, output, verbose,
                                            outputfname, sleepSecs)
Example #4
0
def submitFeatureCoverageWCSIntersection(geoType, dataSetURI, varID, attribute='the_geom', value=None, gmlIDs=None, verbose=False, coverage='true', WFS_URL=None, outputfname=None, sleepSecs=10):
    """
    Makes a featureCoverageWCSIntersection algorithm call. 
    """
    
    featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
    if featureCollection is None:
        return
    processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureCoverageIntersectionAlgorithm'
    inputs = [("DATASET_URI", dataSetURI),
              ("DATASET_ID", varID),
              ("REQUIRE_FULL_COVERAGE",coverage), 
              ("FEATURE_COLLECTION", featureCollection)]
    output = "OUTPUT"
    return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)
Example #5
0
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime,
                                        endTime, attribute, value, gmlIDs,
                                        verbose, coverage, delim, stat, grpby,
                                        timeStep, summAttr, weighted, WFS_URL,
                                        outputfname, sleepSecs):
    """
    Makes a featureWeightedGridStatistics algorithm call. 
    The web service interface implemented is summarized here: 
    https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
    
    Note that varID and stat can be a list of strings.
    
    """
    # test for dods:
    dataSetURI = _execute_request.dodsReplace(dataSetURI)

    log.info('Generating feature collection.')

    featureCollection = _get_geotype._getFeatureCollectionGeoType(
        geoType, attribute, value, gmlIDs, WFS_URL)
    if featureCollection is None:
        return

    processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
    if weighted == False:
        processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'

    solo_inputs = [("FEATURE_ATTRIBUTE_NAME", attribute),
                   ("DATASET_URI", dataSetURI), ("TIME_START", startTime),
                   ("TIME_END", endTime),
                   ("REQUIRE_FULL_COVERAGE", str(coverage).lower()),
                   ("DELIMITER", delim), ("GROUP_BY", grpby),
                   ("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
                   ("SUMMARIZE_FEATURE_ATTRIBUTE", str(summAttr).lower()),
                   ("FEATURE_COLLECTION", featureCollection)]

    if isinstance(stat, list):
        num_stats = len(stat)
        if num_stats > 7:
            raise Exception('Too many statistics were submitted.')
    else:
        num_stats = 1

    if isinstance(varID, list):
        num_varIDs = len(varID)
    else:
        num_varIDs = 1

    inputs = [('', '')] * (len(solo_inputs) + num_varIDs + num_stats)

    count = 0
    rmvCnt = 0

    for solo_input in solo_inputs:
        if solo_input[1] != None:
            inputs[count] = solo_input
            count += 1
        else:
            rmvCnt += 1

    del inputs[count:count + rmvCnt]

    if num_stats > 1:
        for stat_in in stat:
            if stat_in not in [
                    "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM",
                    "COUNT"
            ]:
                raise Exception(
                    'The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"'
                    % stat_in)
            inputs[count] = ("STATISTICS", stat_in)
            count += 1
    elif num_stats == 1:
        if stat not in [
                "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM",
                "COUNT"
        ]:
            raise Exception(
                'The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"'
                % stat)
        inputs[count] = ("STATISTICS", stat)
        count += 1

    if num_varIDs > 1:
        for var in varID:
            inputs[count] = ("DATASET_ID", var)
            count += 1
    elif num_varIDs == 1:
        inputs[count] = ("DATASET_ID", varID)

    output = "OUTPUT"

    return _execute_request._executeRequest(processid, inputs, output, verbose,
                                            outputfname, sleepSecs)
Example #6
0
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs,
                                        verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, WFS_URL, outputfname, sleepSecs):
    """
    Makes a featureWeightedGridStatistics algorithm call. 
    The web service interface implemented is summarized here: 
    https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
    
    Note that varID and stat can be a list of strings.
    
    """
    # test for dods:
    dataSetURI = _execute_request.dodsReplace(dataSetURI)
    
    log.info('Generating feature collection.')
    
    featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
    if featureCollection is None:
        return
    
    processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
    if weighted==False:
        processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
    
    solo_inputs = [("FEATURE_ATTRIBUTE_NAME",attribute), 
              ("DATASET_URI", dataSetURI),  
              ("TIME_START",startTime),
              ("TIME_END",endTime), 
              ("REQUIRE_FULL_COVERAGE",str(coverage).lower()), 
              ("DELIMITER",delim), 
              ("GROUP_BY", grpby),
              ("SUMMARIZE_TIMESTEP", str(timeStep).lower()), 
              ("SUMMARIZE_FEATURE_ATTRIBUTE",str(summAttr).lower()), 
              ("FEATURE_COLLECTION", featureCollection)]
              
    if isinstance(stat, list):
        num_stats=len(stat)
        if num_stats > 7:
            raise Exception('Too many statistics were submitted.')
    else:
        num_stats=1
              
    if isinstance(varID, list):
        num_varIDs=len(varID)
    else:
        num_varIDs=1
    
    inputs = [('','')]*(len(solo_inputs)+num_varIDs+num_stats)
    
    count=0
    rmvCnt=0
    
    for solo_input in solo_inputs:
                    if solo_input[1]!=None:
                            inputs[count] = solo_input
                            count+=1
                    else: 
                            rmvCnt+=1
            
    del inputs[count:count+rmvCnt]
                    
    if num_stats > 1:
        for stat_in in stat:
            if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
                raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat_in)
            inputs[count] = ("STATISTICS",stat_in)
            count+=1
    elif num_stats == 1:
        if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
            raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat)
        inputs[count] = ("STATISTICS",stat)
        count+=1
             
    if num_varIDs > 1:
        for var in varID:
            inputs[count] = ("DATASET_ID",var)
            count+=1
    elif num_varIDs == 1:
        inputs[count] = ("DATASET_ID",varID)
    
    output = "OUTPUT"
    
    return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)