attributes = pyGDP.getAttributes(shapefile)
for attr in attributes:
    print(attr)

# Grab the values from the STATE attribute of sample:CONUS_States
usr_attribute = 'STATE'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print(v)

# Choose Delaware
# Note that removing "value" in the main request below will run all values in the shapefile.
value = ['Delaware']

# Search for datasets
dataSetURIs = pyGDP.getDataSetURI(anyText='wicci')
for dataset in dataSetURIs:
    print(dataset)

# Loop through datasets of interest, in this case the first three OPeNDAP urls.
for dataSetURI in dataSetURIs[1][2][0:3]:
    # Get the available data types associated with the dataSetURI
    dataTypes = pyGDP.getDataType(dataSetURI)
    print(dataTypes)

    # For this example just run the first dataType. This dataType list should be modified if multiple
    # datatypes are required.
    dataType = dataTypes[0]
    print(dataType)

    # Get available time range for the dataset.
    print attr
print

# Grab the values from 'OBJECTID' and 'upload:OKCNTYD'
usr_attribute = 'OBJECTID'
values = pyGDP.getValues(OKshapefile, usr_attribute)
for v in values:
    print v
print

#We set our value to 5
usr_value = 5

# our shapefile = 'upload:OKCNTYD', usr_attribute = 'OBJECTID', and usr_value = 13
# We get the dataset URI that we are interested in
dataSetURIs = pyGDP.getDataSetURI(anyText='prism')
pp = pprint.PrettyPrinter(indent=5, width=60)
pp.pprint(dataSetURIs)
print

# Set our datasetURI
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'
# Get the available data types associated with the dataset
dataType = 'ppt'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t
timeBegin = '1900-01-01T00:00:00.000Z'
timeEnd = '1901-01-01T00:00:00.000Z'
print
shapefile = 'sample:CONUS_States'
attributes = pyGDP.getAttributes(shapefile)
for attr in attributes:
    print attr
print

# Grab the values from 'OBJECTID' and 'upload:OKCNTYD'
usr_attribute = 'STATE'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print v
print

# our shapefile = 'upload:OKCNTYD', usr_attribute = 'OBJECTID', and usr_value = 13
# We get the dataset URI that we are interested in
dataSetURIs = pyGDP.getDataSetURI()
for d in dataSetURIs:
    print d

# Set our datasetURI
dataSetURI = 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/gmo/GMO_w_meta.ncml'
# Get the available data types associated with the dataset
dataType = 'Prcp'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t

"""
Instead of submitting in a value, we submit a list of gmlIDs associated
with either a small portion of that value, or multiple values.
attributes = pyGDP.getAttributes(shapefile)
for attr in attributes:
    print attr


# Grab the values from the STATE attribute of sample:CONUS_States
usr_attribute = 'STATE'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print v

# Choose Colorado
value = ['Colorado']

# Search for datasets
dataSetURIs = pyGDP.getDataSetURI(anyText='prism')
for dataset in dataSetURIs:
	print dataset

# Set our datasetURI to the OPeNDAP/dods response for the prism dataset.
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'

# Get the available data types associated with the dataset
dataTypes = pyGDP.getDataType(dataSetURI)
for dataType in dataTypes:
	print dataType

dataType = 'ppt'

# Get available time range for the dataset.
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
Пример #5
0
        print "\nuploaded {} to server ".format(shp)
    except:
        print "\nshapefile {} found on server".format(shp)
        continue

shapefiles=['upload:' + os.path.split(f)[1][:-4] for f in zipped_shapefiles]

shapefile = shapefiles[0] # for now just using one shapefile

# in the this case, the station identifiers are just consecutive integers
values = pyGDP.getValues(shapefile, attribute)
#values = map(str,sorted(map(int, values))) # could enforce order, but doesn't seem to make a difference

# Search for datasets
print "\nGetting datasets and datatypes..."
dataSetURIs = pyGDP.getDataSetURI(anyText=URI_designator)
dataSetURIs = dataSetURIs[1][2] # this probably needs to be hard-coded based on results of line above
# get datasets that contain the specified URI names
dataSetURIs = [[d for d in dataSetURIs if os.path.split(d)[1] == n][0] for n in URI_names]

if len(dataSetURIs) > 0:
    print '\nFound:'
    for n in dataSetURIs:
        print '{}'.format(n)

# in case of restart, trim already-processed entries from datasets
if restart:
    if download_datasets == 'individually':
        rec_URIs.pop() # if downloading together and last URI is in recfile, the dataset downloaded OK
    dataSetURIs = [d for d in dataSetURIs if d not in rec_URIs]
    print v
print

"""
Instead of specifically specifying a value, we get request to get
the gmlID of these values and append them to a gmlID to be used
as an input instead of value.
"""
wisGMLID = pyGDP.getGMLIDs(shapefile, usr_attribute, 'Wisconsin')
michGMLID = pyGDP.getGMLIDs(shapefile, usr_attribute, 'Michigan')
minnGDMLID = pyGDP.getGMLIDs(shapefile, usr_attribute, 'Minnesota')
gmlIDs = wisGMLID + michGMLID + minnGDMLID

# our shapefile = 'upload:OKCNTYD', usr_attribute = 'OBJECTID', and usr_value = 13
# We get the dataset URI that we are interested in
dataSetURIs = pyGDP.getDataSetURI()
for d in dataSetURIs:
    print d

# Set our datasetURI
dataSetURI = 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/gmo/GMO_w_meta.ncml'
# Get the available data types associated with the dataset
dataType = 'Prcp'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t

"""
Instead of submitting in a value, we submit a list of gmlIDs associated
with either a small portion of that value, or multiple values.
for attr in attributes:
    print attr


# Grab the values from the STATE attribute of sample:CONUS_States
usr_attribute = 'STATE'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print v

# Choose Delaware
# Note that removing "value" in the main request below will run all values in the shapefile.
value = ['Delaware']

# Search for datasets
dataSetURIs = pyGDP.getDataSetURI(anyText='wicci')
for dataset in dataSetURIs:
	print dataset

# Loop through datasets of interest, in this case the first three OPeNDAP urls. 
for dataSetURI in dataSetURIs[1][2][0:3]:
	# Get the available data types associated with the dataSetURI
	dataTypes = pyGDP.getDataType(dataSetURI)
	print dataTypes
	# For this example just run the first dataType. This dataType list should be modified if multiple datatypes are required.
	dataType = dataTypes[0]
	print dataType
	# Get available time range for the dataset.
	timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
	print timeRange
	# Execute a GeatureWeightedGridStatistics request and return the path to the output file. 
Пример #8
0
        print "\nuploaded {} to server ".format(shp)
    except:
        print "\nshapefile {} found on server".format(shp)
        continue

shapefiles = ['upload:' + os.path.split(f)[1][:-4] for f in zipped_shapefiles]

shapefile = shapefiles[0]  # for now just using one shapefile

# in the this case, the station identifiers are just consecutive integers
values = pyGDP.getValues(shapefile, attribute)
#values = map(str,sorted(map(int, values))) # could enforce order, but doesn't seem to make a difference

# Search for datasets
print "\nGetting datasets and datatypes..."
dataSetURIs = pyGDP.getDataSetURI(anyText=URI_designator)
dataSetURIs = dataSetURIs[1][
    2]  # this probably needs to be hard-coded based on results of line above
# get datasets that contain the specified URI names
dataSetURIs = [[d for d in dataSetURIs if os.path.split(d)[1] == n][0]
               for n in URI_names]

if len(dataSetURIs) > 0:
    print '\nFound:'
    for n in dataSetURIs:
        print '{}'.format(n)

# in case of restart, trim already-processed entries from datasets
if restart:
    if download_datasets == 'individually':
        rec_URIs.pop(