Пример #1
0
def submit_request(shapefile, URI, d, timeStart, timeEnd, attribute, values, ofp, mode='individually'):
    '''
    calls pyGDP.submitFeatureWeightedGridStatistics()
    either with single dataset, or list of datasets (depending on mode)
    resubmits after waiting in the case of a server error
    '''

    if mode == 'together':
        outfile = os.path.join(outpath, os.path.split(URI)[1] + '.csv')
        record = ',all datasets written to {}\n'.format(outfile)
    else:
        outfile = os.path.join(outpath, d+'_'+URI[-5:]+'.csv')
        record = ',{}\n'.format(d)

    done = False

    # while loop to wait and then resubmit if server/network fails
    # for some reason this appears to work after an exception (the files keep coming in with the right names), but printing to the output screen stops
    while not done:
        try:
            cida_handle = pyGDP.submitFeatureWeightedGridStatistics(shapefile, URI, d, timeStart, timeEnd, attribute, values)
            done = True
        except Exception, e:
            print e
            print traceback.format_exc()
            print "waiting {} min before trying again...".format(restart_submit_after)
            time.sleep(restart_submit_after * 60) # wait n seconds before attempting to submit again
            print "resubmitting..."
        else:
            ofp.write(cida_handle + record)
            shutil.move(cida_handle, outfile)
            print "saved as: {}".format(outfile)
Пример #2
0
def submit_request(shapefile,
                   URI,
                   d,
                   timeStart,
                   timeEnd,
                   attribute,
                   values,
                   ofp,
                   mode='individually'):
    '''
    calls pyGDP.submitFeatureWeightedGridStatistics()
    either with single dataset, or list of datasets (depending on mode)
    resubmits after waiting in the case of a server error
    '''

    if mode == 'together':
        outfile = os.path.join(outpath, os.path.split(URI)[1] + '.csv')
        record = ',all datasets written to {}\n'.format(outfile)
    else:
        outfile = os.path.join(outpath, d + '_' + URI[-5:] + '.csv')
        record = ',{}\n'.format(d)

    done = False

    # while loop to wait and then resubmit if server/network fails
    # for some reason this appears to work after an exception (the files keep coming in with the right names), but printing to the output screen stops
    while not done:
        try:
            cida_handle = pyGDP.submitFeatureWeightedGridStatistics(
                shapefile, URI, d, timeStart, timeEnd, attribute, values)
            done = True
        except Exception, e:
            print e
            print traceback.format_exc()
            print "waiting {} min before trying again...".format(
                restart_submit_after)
            time.sleep(restart_submit_after *
                       60)  # wait n seconds before attempting to submit again
            print "resubmitting..."
        else:
            ofp.write(cida_handle + record)
            shutil.move(cida_handle, outfile)
            print "saved as: {}".format(outfile)
for v in values:
    print v

# Choose Colorado
value = ['Colorado']

# Search for datasets
dataSetURIs = pyGDP.getDataSetURI(anyText='prism')
for dataset in dataSetURIs:
	print dataset

# Set our datasetURI to the OPeNDAP/dods response for the prism dataset.
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'

# Get the available data types associated with the dataset
dataTypes = pyGDP.getDataType(dataSetURI)
for dataType in dataTypes:
	print dataType

dataType = 'ppt'

# Get available time range for the dataset.
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t

# Execute a GeatureWeightedGridStatistics request and return the path to the output file. 
# Note that this is for one time step but could be for multiple.
outputfile = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeRange[0], timeRange[0], usr_attribute, value, verbose=True)
print outputfile
Пример #4
0
# Grab the values from 'area_name' and 'sample:CSC_Boundaries'
usr_attribute = 'area_name'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print(v)

usr_value = 'Southwest'

dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataTypes = pyGDP.getDataType(dataSetURI)
for d in dataTypes:
    print(d)

dataType = 'Prcp'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print(t)

timeBegin = '1960-01-01T00:00:00.000Z'
timeEnd = '1960-01-21T00:00:00.000Z'
outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile,
                                                       dataSetURI,
                                                       dataType,
                                                       timeBegin,
                                                       timeEnd,
                                                       usr_attribute,
                                                       usr_value,
                                                       gmlIDs=None,
                                                       verbose=True)
Пример #5
0
values = pyGDP.getValues(shapefile, polyAttribute)

for v in values:
    print v

polyValue = 'BD_Mtn'

dataSetURI = 'http://cida.usgs.gov/thredds/dodsC/loca_historical'

datatypes = pyGDP.getDataType(dataSetURI)

outputpath_earlys=list()
DataTypes_list = ['pr_ACCESS1-0','pr_CanESM2', 'pr_CCSM4', 'pr_CESM1-BGC', 'pr_CMCC-CMS',\
                  'pr_CNRM-CM5', 'pr_GFDL-CM3', 'pr_HadGEM2-CC', \
                  'pr_HadGEM2-ES', 'pr_MIROC-ESM-CHEM', 'pr_MIROC-ESM', \
                  'pr_CSIRO-Mk3-6-0', 'pr_GFDL-ESM2M']


for Type_item in DataTypes_list:
    for datatype in datatypes:
        if Type_item in datatype:
            outName = 'Loca_Historical_' + datatype + '.csv'
            if os.path.isfile(outName):
                outputpath_earlys.append(outputPath)
            else:
                outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, datatype, '1950-01-01', '2005-12-31', polyAttribute, polyValue, outputfname = outName)
                print(outputPath)
        
       
def main_func(curdir, set, region):
    
    def Region_lookup(region):
        region_properties = []
        if region == 'nhru':
            region_properties.append(371)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d35da1e4b0ca184833940c')
            region_properties.append('false')
        if region == 'R01':
            region_properties.append(2462)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5244735ae4b05b217bada04e')
            region_properties.append('false')
        if region == 'R02':
            region_properties.append(4827)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52696784e4b0584cbe9168ee')
            region_properties.append('false')
        if region == 'R03':
            region_properties.append(9899)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5283bd23e4b047efbbb57922')
            region_properties.append('false')
        if region == 'R04':
            region_properties.append(5936)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5284ff57e4b063f258e61b9d')
            region_properties.append('false')
        if region == 'R05':
            region_properties.append(7182)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/528516bbe4b063f258e62161')
            region_properties.append('false')
        if region == 'R06':
            region_properties.append(2303)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d75296e4b055e0afd5be2c')
            region_properties.append('true')
        if region == 'R07':
            region_properties.append(8205)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52851cd5e4b063f258e643dd')
            region_properties.append('true')
        if region == 'R08':
            region_properties.append(4449)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52854695e4b063f258e6513c')
            region_properties.append('true')
        if region == 'R10L':
            region_properties.append(8603)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/520031dee4b0ad2d97189db2')
            region_properties.append('true')
        elif region =='R10U':
            region_properties.append(10299)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5216849ee4b0b45d6ba61e2e')
            region_properties.append('false')
        elif region == 'R11':
            region_properties.append(7373)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d1f9ebe4b08b18a62d586b')
            region_properties.append('true')
        elif region == 'R12':
            region_properties.append(7815)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5201328ae4b009d47a4c247a')
            region_properties.append('false')
        elif region == 'R13':
            region_properties.append(1958)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d752b9e4b055e0afd5be36')
            region_properties.append('false')
        elif region == 'R14':
            region_properties.append(3879)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52029c68e4b0e21cafa4b40c')
            region_properties.append('false')
        elif region == 'R15':
            region_properties.append(3441)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5285389ae4b063f258e64863')
            region_properties.append('false')
        elif region == 'R16':
            region_properties.append(2664)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52853f97e4b063f258e64875')
            region_properties.append('false')

            return region_properties
    
    def list_define(set):

        if set == 'cccma_cgmc3_1':
            return ['cccma_cgmc3'+str(scenario), 'http://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'cnrm_cm3':
            return ['cnrm_cm3'+str(scenario), 'http://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
             'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
             'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
             'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'gfdl_cm2_1':
            return ['gfdl_cm2_1'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'ipsl_cm4':
            return ['ipsl_cm4'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'miroc3_2_medres':
            return ['miroc3_2_medres'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'miub_echo_g':
            return ['miub_echo_g'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'mri_cgcm2_3_2a':
            return ['mri_cgcm2_3_2a'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
        elif set == 'mpi_echam5':
            return ['mpi_echam5'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
                 'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
                 'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
                 'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
     
    global csvfile, vt_dataset, nhru, length, vt_datatype, url
    import pyGDP
    
    Region_return=Region_lookup(region)
    hrus = Region_return[0]
    nhru = hrus
    ScienceBase_URL= Region_return[1]
     
    pyGDP.WFS_URL = ScienceBase_URL
    url = pyGDP.WFS_URL
    pyGDP = pyGDP.pyGDPwebProcessing()
     
    # change working directory so the GDP output will be written there
     
    # Datasets and their properties
    # run 1, 2 , and 3
     
    #**********************************
    # run 1 only
    #scenario = 'a2'
    # other scenarios are 'a1b' and 'b1'
    #scenarios = ['a2','a1b','b1']
    scenarios = ['a2', 'a1b']
     
    timestart = '2046-01-15T00:00:00.000Z'
    timeend='2100-12-15T00:00:00.000Z'
     
    #**********************************
    # Datasets for each scenario - note that mpi_echam5 is not run for scenario a1b
    #a1b
    #data=[cccma_cgmc3_1,cnrm_cm3,gfdl_cm2_1,ipsl_cm4,miroc3_2_medres,miub_echo_g,mri_cgcm2_3_2a]
    #a2
    #data=[cccma_cgmc3_1,cnrm_cm3,gfdl_cm2_1,ipsl_cm4,miroc3_2_medres,miub_echo_g,mpi_echam5,mri_cgcm2_3_2a]
    #b1
    #data=[cccma_cgmc3_1,cnrm_cm3,gfdl_cm2_1,ipsl_cm4,miroc3_2_medres,miub_echo_g,mpi_echam5,mri_cgcm2_3_2a]
     
    
    # get list of shapefiles uploaded to the GDP
    shapefiles = pyGDP.getShapefiles()
    for shp in shapefiles:
        print shp
     
    # feature loaded from sciencebase
    #should shapefile be sb:SP_hru instead?
    shapefile = 'sb:nhru'
    user_attribute = 'hru_id_loc'
     
    user_value = None
     
    os.chdir(curdir)
    dir = os.getcwd()
     
    vt_data = list_define(set)
    vt_datatype = vt_data[5:] 
    
    #for scenario in scenarios:  
         
#     cnrm_cm3 = ['cnrm_cm3'+str(scenario), 'http://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#          'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#          'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#          'cnrm_cm3-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     gfdl_cm2_1 = ['gfdl_cm2_1'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'gfdl_cm2_1-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     ipsl_cm4 = ['ipsl_cm4'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'ipsl_cm4-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     mpi_echam5 = ['mpi_echam5'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'mpi_echam5-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     miroc3_2_medres = ['miroc3_2_medres'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'miroc3_2_medres-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     cccma_cgmc3_1 = ['cccma_cgmc3'+str(scenario), 'http://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'cccma_cgcm3_1-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     miub_echo_g = ['miub_echo_g'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'miub_echo_g-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
#      
#     mri_cgcm2_3_2a = ['mri_cgcm2_3_2a'+str(scenario), 'dods://pcmdi8.llnl.gov/thredds/dodsC/bcca/BCCA_0.125deg-gregorian-sres-monthly', 'Monthly', timestart, timeend,\
#              'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-pr-BCCA_0-125deg-monthly',
#              'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-tasmax-BCCA_0-125deg-monthly',
#              'mri_cgcm2_3_2a-gregorian-sres'+str(scenario)+'-run1-tasmin-BCCA_0-125deg-monthly']
     
     
#         if scenario == 'a1b':
#             data=[cccma_cgmc3_1,cnrm_cm3,gfdl_cm2_1,ipsl_cm4,miroc3_2_medres,miub_echo_g,mri_cgcm2_3_2a]
#         elif scenario == 'a2':
#                 data=[cccma_cgmc3_1,cnrm_cm3,gfdl_cm2_1,ipsl_cm4,miroc3_2_medres,miub_echo_g,mpi_echam5,mri_cgcm2_3_2a]
    
    timestart = time.time()
    
    
    #################
    #scenarios = ['a2', 'a1b']
    scenario = 'a2'
    
    
    
    
    #for dataset in data:
    #file_loc = dir+'\\'+scenario+'\\'+dataset[0]
    file_loc = dir+'\\'+set
    if not os.path.exists(file_loc):
        os.mkdir(file_loc)
    os.chdir(file_loc)
    #print "The current scenario is: " +scenario + "dataset being worked on is: " + dataset[0]
    print "The current scenario is: " +scenario + "dataset being worked on is: " + set

    #The url of each dataset
    dataSet = vt_data[1]
    #dataSet = dataset[1]

    #The precipitation and temperatures of each dataset
    #Start at position(not index) 5 until the end of the
    #dictionary's key(which is a list)
      
    dataType=vt_data[5:]  
    #dataType = dataset[5:]
        
    # daily or monthly for additional aggregation/formatting (not appended yet)
    timeStep = vt_data[2]
    length = timeStep
    #timeStep = dataset[2]

    #Start date
    timeBegin = vt_data[3]
    #timeBegin = dataset[3]
    #End date
    timeEnd = vt_data[4]
    #timeEnd = dataset[4]

    # data processing arguments
    gmlIDs=None
    verbose=True
    #coverage = 'false' check if on US border/ocean
    coverage = Region_return[2]
    delim='COMMA'
    stats='MEAN'

    # run the pyGDP
    start = time.time()
    outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute, user_value, gmlIDs, verbose, coverage, delim, stats)
    end = time.time()
    print 'Start time is: ' + str(start)
    print 'End time is: ' + str(end)
    print 'Total time was: ' + str(end-start)
    print outputPath
    # copy the output and rename it
    
#     ind = 5
#     for var in range(5, len(dataset)):
#         line = dataset[var].split('-')
#         dataset[ind] = line[4]
#         ind += 1
#         
#     dataType = dataset[5:]
    
    #shutil.copy2(outputPath, region+'_'+dataset[0]+'.csv')
    shutil.copy2(outputPath, region+'_'+vt_data[0]+'.csv')
    
#     csvfile = os.getcwd()+region+'_'+dataset[0]+'.csv'       
#     #Parse the csv file 
#     index = 0            
#     
#     csvread = csv.reader(open(region+'_'+dataset[0] + '.csv', "rb")) 
#     
#     csvwrite = csv.writer(open(dataType[0] + '.csv', "wb"))
#         
#     temp = csvread
#     var = temp.next()
#     var[0] = '#'+dataType[0]
#     #Gets gage ids
#     gage = temp.next()
#     
#     #Writes current variable to csv file
#     csvwrite.writerow(var)
#     #Writes all gage ids to csv file
#     csvwrite.writerow(gage)
#     
#     for variable in dataType:                
#             
#         for row in csvread:
#             #if on last variable     
#             if variable == dataType[len(dataType) - 1]: 
#                 csvwrite.writerow(row)               
#             else:  
#                 if (row[0] in '#'+dataType[index+1]) or (row[0] in '# '+dataType[index+1]):
#                     #Line 33 is used for titling the csv file the name of the variable (like tmin, ppt, or tmax)
#                     var = '#'+dataType[index+1]
#                     parsedFiles.append(os.getcwd()+'\\'+variable+'.csv')
#                     csvwrite = csv.writer(open(dataType[index+1] + '.csv', "wb"))
#                     row[1:] = ""
#                     row[0] = var
#                     csvwrite.writerow(row)
#                     csvwrite.writerow(gage)
#                     if len(dataType) == 2:
#                         csvread.next()
#                     else:
#                         csvread.next()
#                         csvwrite.writerow(csvread.next())
#                         csvwrite.writerow(csvread.next())
#                     break
#                 else:
#                     if dataType[index+1] not in row[0] and row[0] not in dataType[index+1]:
#                         csvwrite.writerow(row)
#         print "Finished parsing " + variable + ".csv"
#         parsedFiles.append(os.getcwd()+'\\'+variable+'.csv')
#         # use index to keep track of next variable
#         if (index + 1) < len(dataType):
#             index += 1
        
    timeend = time.time()
    print 'Start time of pyGDP: ' + str(timestart)
    print 'End time of pyGDP: ' + str(timeend)
    print 'Total time of pyGDP: ' + str(timeend-timestart)
    

    os.chdir(dir)
Пример #7
0
# This dataset URI will be available here: http://cida.usgs.gov/thredds/
# At the time of creation, this dataset was internal USGS only.
datasetURI = 'https://cida.usgs.gov/thredds/dodsC/ssebopeta/monthly'

# Note that pyGDP offers convenience functions to determine what these options are.
# These were derived from the thredds server housing the data.
dataType = 'et'
timeStart = '2000-01-01T00:00:00.000Z'
timeEnd = '2010-01-01T00:00:00.000Z'

outputFileName = 'ETAuSable2000-2010.csv'

outputFile_handle = pyGDP.submitFeatureWeightedGridStatistics(shapefile,
                                                              datasetURI,
                                                              dataType,
                                                              timeStart,
                                                              timeEnd,
                                                              attribute,
                                                              value,
                                                              verbose=True)
os.rename(outputFile_handle, outputFileName)

dates = []
vals = []
datesT, valsT = numpy.loadtxt(
    outputFileName,
    unpack=True,
    skiprows=3,
    delimiter=',',
    converters={0: mdates.strpdate2num("%Y-%m-%dT%H:%M:%SZ")})

for v in valsT:
import pyGDP
import matplotlib.dates as mdates
import numpy as np

pyGDP = pyGDP.pyGDPwebProcessing()

"""
This example shows how easy it is to make a call, if all
inputs are known before hand.
"""

shapefile = 'sample:simplified_HUC8s'
user_attribute = 'REGION'
user_value = 'Great Lakes Region'
dataSet = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataType = 'Prcp'
timeBegin = '1970-01-23T00:00:00.000Z'
timeEnd = '1979-01-23T00:00:00.000Z'


print 'Processing request.'
outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute, user_value)
        
jd,precip =np.loadtxt(outputPath,unpack=True,skiprows=3,delimiter=',',
    converters={0: mdates.strpdate2num('%Y-%m-%dT%H:%M:%SZ')}) 

print precip[0:100]
Пример #9
0
import numpy as np

pyGDP = pyGDP.pyGDPwebProcessing()
"""
This example shows how easy it is to make a call, if all
inputs are known before hand.
"""

shapefile = 'sample:simplified_HUC8s'
user_attribute = 'REGION'
user_value = 'Great Lakes Region'
dataSet = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataType = 'Prcp'
timeBegin = '1970-01-23T00:00:00.000Z'
timeEnd = '1979-01-23T00:00:00.000Z'

print 'Processing request.'
outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet,
                                                       dataType, timeBegin,
                                                       timeEnd, user_attribute,
                                                       user_value)

jd, precip = np.loadtxt(
    outputPath,
    unpack=True,
    skiprows=3,
    delimiter=',',
    converters={0: mdates.strpdate2num('%Y-%m-%dT%H:%M:%SZ')})

print precip[0:100]
Пример #10
0
# as an input instead of value.
michGMLID = pyGDP.getGMLIDs(shapefile, usr_attribute, 'Michigan')
gmlIDs = michGMLID

# We get the dataset URI that we are interested in by searching for prism:
dataSetURIs = pyGDP.getDataSetURI(anyText='prism')
pp = pprint.PrettyPrinter(indent=5, width=60)
pp.pprint(dataSetURIs)

# Set our datasetURI
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'
# Get the available data types associated with the dataset
datatypes = pyGDP.getDataType(dataSetURI)
for dt in datatypes:
    print(dt)

# Set the dataType. Note that leaving dataType out below will select all.
dataType = 'ppt'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print(t)

# Instead of submitting in a value, we submit a list of gmlIDs associated
# with either a small portion of that value, or multiple values.

value = None
path = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeRange[0], timeRange[0],
                                                 usr_attribute, value, gmlIDs, verbose=True)
print(path)
#We set our value to 5
usr_value = 5

# our shapefile = 'upload:OKCNTYD', usr_attribute = 'OBJECTID', and usr_value = 13
# We get the dataset URI that we are interested in
dataSetURIs = pyGDP.getDataSetURI(anyText='prism')
pp = pprint.PrettyPrinter(indent=5,width=60)
pp.pprint(dataSetURIs)
print

# Set our datasetURI
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'
# Get the available data types associated with the dataset
dataType = 'ppt'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t
timeBegin = '1900-01-01T00:00:00.000Z'
timeEnd = '1901-01-01T00:00:00.000Z'
print


textFile = pyGDP.submitFeatureWeightedGridStatistics(OKshapefile, dataSetURI, dataType, timeBegin, timeEnd,usr_attribute, usr_value,verbose=True)

jd,precip=np.loadtxt(textFile,unpack=True,skiprows=3,delimiter=',', 
                     converters={0: mdates.strpdate2num('%Y-%m-%dT%H:%M:%SZ')})

print 'Some data:'
print precip[0:100]
Пример #12
0
def main_func():
    
    def Region_lookup(region):
        region_properties = []
        if region == 'nhru':
            region_properties.append(371)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d35da1e4b0ca184833940c')
            region_properties.append('false')
        if region == 'R01':
            region_properties.append(2462)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5244735ae4b05b217bada04e')
            region_properties.append('false')
        if region == 'R02':
            region_properties.append(4827)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52696784e4b0584cbe9168ee')
            region_properties.append('false')
        if region == 'R03':
            region_properties.append(9899)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5283bd23e4b047efbbb57922')
            region_properties.append('false')
        if region == 'R04':
            region_properties.append(5936)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5284ff57e4b063f258e61b9d')
            region_properties.append('false')
        if region == 'R05':
            region_properties.append(7182)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/528516bbe4b063f258e62161')
            region_properties.append('false')
        if region == 'R06':
            region_properties.append(2303)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d75296e4b055e0afd5be2c')
            region_properties.append('true')
        if region == 'R07':
            region_properties.append(8205)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52851cd5e4b063f258e643dd')
            region_properties.append('true')
        if region == 'R08':
            region_properties.append(4449)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52854695e4b063f258e6513c')
            region_properties.append('true')
        if region == 'R10L':
            region_properties.append(8603)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/520031dee4b0ad2d97189db2')
            region_properties.append('true')
        elif region =='R10U':
            region_properties.append(10299)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5216849ee4b0b45d6ba61e2e')
            region_properties.append('false')
        elif region == 'R11':
            region_properties.append(7373)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d1f9ebe4b08b18a62d586b')
            region_properties.append('true')
        elif region == 'R12':
            region_properties.append(7815)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5201328ae4b009d47a4c247a')
            region_properties.append('false')
        elif region == 'R13':
            region_properties.append(1958)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d752b9e4b055e0afd5be36')
            region_properties.append('false')
        elif region == 'R14':
            region_properties.append(3879)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52029c68e4b0e21cafa4b40c')
            region_properties.append('false')
        elif region == 'R15':
            region_properties.append(3441)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5285389ae4b063f258e64863')
            region_properties.append('false')
        elif region == 'R16':
            region_properties.append(2664)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52853f97e4b063f258e64875')
            region_properties.append('false')

            return region_properties
    
    global parsedFiles, csvfile, vistrails_data_set, nhru, length
    
    regions = ['R06', 'R10L', 'R11', 'R12', 'R13', 'R14']
    region = 'R06'
    parsedFiles = []
    csvfile = ''
    vistrails_data_set = []
    
    Region_return=Region_lookup(region)
    hrus = Region_return[0]
    nhru = hrus
    ScienceBase_URL= Region_return[1]
    
    pyGDP.WFS_URL = ScienceBase_URL
    
    # call web processing module
    
    pyGDP = pyGDP.pyGDPwebProcessing()
    
    # change working directory so the GDP output will be written there
    
    # Datasets and their properties
    # run 1, 2 , and 3
    
    #**********************************
    # run 1 only
    #scenario = 'a2'
    # other scenarios are 'a1b' and 'b1'
    scenarios = ['a2','a1b'] # not running b1 or a1fi
    
    timestart = '1960-01-15T00:00:00.000Z'
    timeend = '2010-12-15T00:00:00.000Z'
    
    #     timestart = '2010-12-15T00:00:00.000Z'
    #     timeend='2099-12-15T00:00:00.000Z'
    
    #**********************************
    # datasets greyed out - we are not initially using.
    ##ccsm = ['ccsm'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
    ##    'ccsm-'+scenarios[0]+'-pr-NAm-grid',
    ##    'ccsm-'+scenarios[0]+'-tmax-NAm-grid',
    ##    'ccsm-'+scenarios[0]+'-tmin-NAm-grid']
    
    
    
    
    shapefiles = pyGDP.getShapefiles()
    for shp in shapefiles:
        print shp
    
    # feature loaded from sciencebase
    #should shapefile be sb:SP_hru instead?
    shapefile = 'sb:'+region+'_hru'
    user_attribute = 'hru_id_loc'
    
    user_value = None
    
    os.chdir('C:\\Users\\reimandy\\workspace\\userpackages\\WaterBalanceModel\\Step1_CLIMATE_DATA\\'+region+'\\HAYHOE')
    dir = os.getcwd()
    
    for scenario in scenarios:
    
        cgcm3_t47 = ['cgcm3_t47'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'cgcm3_t47-'+str(scenario)+'-pr-NAm-grid',
                'cgcm3_t47-'+str(scenario)+'-tmax-NAm-grid',
                'cgcm3_t47-'+str(scenario)+'-tmin-NAm-grid']
        
        cgcm3_t63 = ['cgcm3_t63'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'cgcm3_t63-'+str(scenario)+'-pr-NAm-grid',
                'cgcm3_t63-'+str(scenario)+'-tmax-NAm-grid',
                'cgcm3_t63-'+str(scenario)+'-tmin-NAm-grid']
        
        cnrm = ['cnrm'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'cnrm-'+str(scenario)+'-pr-NAm-grid',
                'cnrm-'+str(scenario)+'-tmax-NAm-grid',
                'cnrm-'+str(scenario)+'-tmin-NAm-grid']
        
        ##csiro = ['csiro'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'csiro-'+scenarios[0]+'-pr-NAm-grid',
        ##        'csiro-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'csiro-'+scenarios[0]+'-tmin-NAm-grid']
        
        ##echam5 = ['echam5'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'echam5-'+scenarios[0]+'-pr-NAm-grid',
        ##        'echam5-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'echam5-'+scenarios[0]+'-tmin-NAm-grid']
        
        echo = ['echo'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'echo-'+str(scenario)+'-pr-NAm-grid',
                'echo-'+str(scenario)+'-tmax-NAm-grid',
                'echo-'+str(scenario)+'-tmin-NAm-grid']
        
        gfdl_2_1 = ['gfdl_2-1'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'gfdl_2-1-'+str(scenario)+'-pr-NAm-grid',
                'gfdl_2-1-'+str(scenario)+'-tmax-NAm-grid',
                'gfdl_2-1-'+str(scenario)+'-tmin-NAm-grid']
        
        ##giss_aom = ['giss_aom'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'giss_aom-'+scenarios[0]+'-pr-NAm-grid',
        ##        'giss_aom-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'giss_aom-'+scenarios[0]+'-tmin-NAm-grid']
        
        ##hadcm3 = ['hadcm3'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'hadcm3-'+scenarios[0]+'-pr-NAm-grid',
        ##        'hadcm3-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'hadcm3-'+scenarios[0]+'-tmin-NAm-grid']
        
        ##miroc_hi = ['miroc_hi'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'miroc_hi-'+scenarios[0]+'-pr-NAm-grid',
        ##        'miroc_hi-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'miroc_hi-'+scenarios[0]+'-tmin-NAm-grid']
        
        miroc_med = ['miroc_med'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'miroc_med-'+str(scenario)+'-pr-NAm-grid',
                'miroc_med-'+str(scenario)+'-tmax-NAm-grid',
                'miroc_med-'+str(scenario)+'-tmin-NAm-grid']
        
        mri_cgcm2 = ['mri_cgcm2'+str(scenario), 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
                'mri_cgcm2-'+str(scenario)+'-pr-NAm-grid',
                'mri_cgcm2-'+str(scenario)+'-tmax-NAm-grid',
                'mri_cgcm2-'+str(scenario)+'-tmin-NAm-grid']
        
        ##pcm = ['pcm'+scenarios[0], 'http://cida.usgs.gov/thredds/dodsC/dcp/conus', 'Daily', timestart, timeend,\
        ##        'pcm-'+scenarios[0]+'-pr-NAm-grid',
        ##        'pcm-'+scenarios[0]+'-tmax-NAm-grid',
        ##        'pcm-'+scenarios[0]+'-tmin-NAm-grid']
        # get list of shapefiles uploaded to the GDP
    
    
    
        if scenario == 'a1b':
            #data=[cgcm3_t47,cgcm3_t63,cnrm,echam5,echo,giss_aom,hadcm3,miroc_hi,pcm]
            data=[cgcm3_t47,cgcm3_t63,cnrm,echo]
        elif scenario == 'a2':
            #data=[ccsm,cgcm3_t47,cgcm3_t63,cnrm,csiro,echam5,echo,gfdl_2_1,hadcm3,miroc_med,mri_cgcm2,pcm]
            data = [cgcm3_t47,cgcm3_t63,cnrm,echo,gfdl_2_1,miroc_med,mri_cgcm2]
    
        for dataset in data:
            file_loc = dir+'\\'+scenario+'\\'+dataset[0]
            if not os.path.exists(file_loc):
                os.mkdir(file_loc)
            os.chdir(file_loc)
            print "The current dataset being worked on is: " + dataset[0]
    
    
            #The url of each dataset
            dataSet = dataset[1]
    
            #The precipitation and temperatures of each dataset
            #Start at position(not index) 5 until the end of the
            #dictionary's key(which is a list)
    
            dataType = dataset[5:]
    
            # http://cida.usgs.gov/thredds/dodsC/dcp/conus or Daily for additional aggregation/formatting (not appended yet)
            timeStep = dataset[2]
    
            #Start date
            timeBegin = dataset[3]
            #End date
            timeEnd = dataset[4]
    
            # data processing arguments
            gmlIDs=None
            verbose=True
            #coverage = 'false' check if on US border/ocean
            coverage = Region_return[2]
            delim='COMMA'
            stats='MEAN'
    
            # run the pyGDP
            start = time.time()
            outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute, user_value, gmlIDs, verbose, coverage, delim, stats)
            end = time.time()
            print 'Start time is: ' + str(start)
            print 'End time is: ' + str(end)
            print 'Total time was: ' + str(end-start)
            print outputPath
            # copy the output and rename it
            shutil.copy2(outputPath, region+'_'+dataset[0]+'.csv')
    
            ind = 5
            for var in range(5, len(dataset)):
                line = dataset[var].split('-')
                dataset[ind] = line[2]
                ind += 1
    
            dataType = dataset[5:]
    
            #Parse the csv file
            index = 0
            #parsedFiles = []
    
    
            csvread = csv.reader(open(region+'_'+dataset[0] + '.csv', "rb"))
    
            csvwrite = csv.writer(open(dataType[0] + '.csv', "wb"))
            #parsedFiles.append(csvwrite)
    
            temp = csvread
            var = temp.next()
            var[0] = '#'+dataType[0]
            #Gets gage ids
            gage = temp.next()
    
            #Writes current variable to csv file
            csvwrite.writerow(var)
            #Writes all gage ids to csv file
            csvwrite.writerow(gage)
    
            for variable in dataType:
    
                for row in csvread:
                    #if on last variable
                    if variable == dataType[len(dataType) - 1]:
                        csvwrite.writerow(row)
                    else:
                        if (row[0] in '#'+dataType[index+1]) or (row[0] in '# '+dataType[index+1]):
                            #Line 33 is used for titling the csv file the name of the variable (like tmin, ppt, or tmax)
                            var = '#'+dataType[index+1]
                            csvwrite = csv.writer(open(dataType[index+1] + '.csv', "wb"))
                            #parsedFiles.append(csvwrite)
                            row[1:] = ""
                            row[0] = var
                            csvwrite.writerow(row)
                            csvwrite.writerow(gage)
    
                            if len(dataType) == 2:
                                csvread.next()
                            else:
                                temp = csvread
                                csvwrite.writerow(csvread.next())
                                csvwrite.writerow(temp.next())
                            break
                        else:
                            if dataType[index+1] not in row[0] and row[0] not in dataType[index+1]:
                                csvwrite.writerow(row)
                print "Finished parsing " + variable + ".csv"
                # use index to keep track of next variable
                if (index + 1) < len(dataType):
                    index += 1
                #pyGDP.setCSV_Parsed_Files(parsedFiles)
            os.chdir(dir)
Пример #13
0
def main_func(curdir, data_set, region):
    def Region_lookup(region):
        region_properties = []

        if region == "nhru":
            region_properties.append(371)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/51d35da1e4b0ca184833940c")
            region_properties.append("false")
        elif region == "R01":
            region_properties.append(2462)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5244735ae4b05b217bada04e")
            region_properties.append("false")
        elif region == "R02":
            region_properties.append(4827)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/52696784e4b0584cbe9168ee")
            region_properties.append("false")
        elif region == "R03":
            region_properties.append(9899)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5283bd23e4b047efbbb57922")
            region_properties.append("false")
        elif region == "R04":
            region_properties.append(5936)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5284ff57e4b063f258e61b9d")
            region_properties.append("false")
        elif region == "R05":
            region_properties.append(7182)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/528516bbe4b063f258e62161")
            region_properties.append("false")
        elif region == "R06":
            region_properties.append(2303)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/51d75296e4b055e0afd5be2c")
            region_properties.append("true")
        elif region == "R07":
            region_properties.append(8205)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/52851cd5e4b063f258e643dd")
            region_properties.append("true")
        elif region == "R08":
            region_properties.append(4449)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/52854695e4b063f258e6513c")
            region_properties.append("true")
        elif region == "R10L":
            region_properties.append(8603)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/520031dee4b0ad2d97189db2")
            region_properties.append("true")
        elif region == "R10U":
            region_properties.append(10299)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5216849ee4b0b45d6ba61e2e")
            region_properties.append("false")
        elif region == "R11":
            region_properties.append(7373)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/51d1f9ebe4b08b18a62d586b")
            region_properties.append("true")
        elif region == "R12":
            region_properties.append(7815)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5201328ae4b009d47a4c247a")
            region_properties.append("false")
        elif region == "R13":
            region_properties.append(1958)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/51d752b9e4b055e0afd5be36")
            region_properties.append("false")
        elif region == "R14":
            region_properties.append(3879)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/52029c68e4b0e21cafa4b40c")
            region_properties.append("false")
        elif region == "R15":
            region_properties.append(3441)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/5285389ae4b063f258e64863")
            region_properties.append("false")
        elif region == "R16":
            region_properties.append(2664)
            region_properties.append("https://www.sciencebase.gov/catalogMaps/mapping/ows/52853f97e4b063f258e64875")
            region_properties.append("false")

        return region_properties

    def list_define(data_set):
        if data_set == "PRISM":
            return [
                "PRISM",
                "http://cida.usgs.gov/thredds/dodsC/prism",
                "Monthly",
                "2010-01-01T00:00:00.000Z",
                "2012-12-31T00:00:00.000Z",
                "ppt",
                "tmx",
                "tmn",
            ]
        elif data_set == "MPI_ECHAM5":
            return [
                "MPI ECHAM5",
                "http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/EH5/merged/Monthly/RegCM3_Monthly_merged_EH5.ncml",
                "Monthly",
                "1968-01-01T00:00:00.000Z",
                "2099-12-31T00:00:00.000Z",
                "RT",
                "TA",
            ]
        elif data_set == "GENMON":
            return [
                "GENMON",
                "http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/GENMOM/merged/Monthly/RegCM3_Monthly_merged_GENMOM.ncml",
                "Monthly",
                "1980-01-01T00:00:00.000Z",
                "2089-12-31T00:00:00.000Z",
                "RT",
                "TA",
            ]
        elif data_set == "GFDL_CM_2_0":
            return [
                "GFDL CM 2.0",
                "http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/GFDL/merged/Monthly/RegCM3_Monthly_merged_GFDL.ncml",
                "Monthly",
                "1970-01-01T00:00:00.000Z",
                "2069-12-31T00:00:00.000Z",
                "RT",
                "TA",
            ]
        elif data_set == "NOAA_NCEP":
            return [
                "NOAA NCEP",
                "http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/NCEP/merged/Monthly/RegCM3_Monthly_merged_NCEP.ncml",
                "Monthly",
                "1982-01-01T00:00:00.000Z",
                "2007-12-31T00:00:00.000Z",
                "RT",
                "TA",
            ]
        elif data_set == "GSD":
            return [
                "Gridded Observed Data(1949-2010)",
                "http://cida.usgs.gov/thredds/dodsC/new_gmo",
                "Daily",
                "1949-01-01T00:00:00.000Z",
                "2010-12-31T00:00:00.000Z",
                "pr",
                "tas",
            ]
        elif data_set == "DAYMET":
            return [
                "DAYMET",
                "dods://cida-eros-mows1.er.usgs.gov:8080/thredds/dodsC/daymet",
                "Daily",
                "2010-01-01T00:00:00.000Z",
                "2012-01-01T00:00:00.000Z",
                "prcp",
                "tmax",
                "tmin",
            ]
        elif data_set == "Idaho":
            return [
                "Idaho",
                "http://cida.usgs.gov/thredds/dodsC/UofIMETDATA",
                "Daily",
                "1979-01-01T00:00:00.000Z",
                "2013-01-01T00:00:00.000Z",
                "precipitation_amount",
                "min_air_temperature",
                "max_air_temperature",
            ]

    global csvfile, vt_dataset, nhru, length, vt_datatype, url
    import pyGDP

    Region_return = Region_lookup(region)
    hrus = Region_return[0]
    nhru = hrus
    ScienceBase_URL = Region_return[1]

    # NHDplus region

    # link to region 6 hrus on ScienceBase

    pyGDP.WFS_URL = ScienceBase_URL
    url = pyGDP.WFS_URL
    pyGDP = pyGDP.pyGDPwebProcessing()

    # change working directory so the GDP output will be written there

    # Datasets and their properties
    # **********************************
    # Gridded_Observed_Data_1950_1999 = ['Gridded Observed Data(1950-1999)', 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml', 'Daily', '1950-01-01T00:00:00.000Z', '1999-12-31T00:00:00.000Z', 'Prcp', 'Tavg']

    # data = [PRISM]#,MPI_ECHAM5,GENMON,GFDL_CM_2_0,NOAA_NCEP,GSD,DAYMET]
    # prism starts at 1895

    # get list of shapefiles uploaded to the GDP
    shapefiles = pyGDP.getShapefiles()
    for shp in shapefiles:
        print shp

    # feature loaded from sciencebase
    # shapefile = 'sb:'+region+'_hru'
    shapefile = "sb:nhru"
    user_attribute = "hru_id_loc"
    user_value = None

    # os.chdir('C:\\Users\\reimandy\\workspace\\userpackages\\WaterBalanceModel\\Step1_CLIMATE_DATA\\'+region+'\\SBDDS')
    os.chdir(curdir)
    dir = os.getcwd()
    vt_data = list_define(data_set)
    vt_datatype = vt_data[5:]

    timestart = time.time()
    file_loc = curdir + "\\" + data_set
    if not os.path.exists(file_loc):
        os.mkdir(file_loc)
    os.chdir(file_loc)
    print "The current dataset being worked on is: " + data_set

    # The url of each dataset
    dataSet = vt_data[1]

    # The precipitation and temperatures of each dataset
    # Start at position(not index) 5 until the end of the
    # dictionary's key(which is a list)
    dataType = vt_data[5:]

    # daily or monthly for additional aggregation/formatting (not appended yet)
    timeStep = vt_data[2]
    # Length is for connecting to vistrails
    length = timeStep

    # Start date
    timeBegin = vt_data[3]
    # End date
    timeEnd = vt_data[4]

    # data processing arguments
    gmlIDs = None
    verbose = True
    # coverage = 'false' check if on US border/ocean

    coverage = Region_return[2]
    delim = "COMMA"
    stats = "MEAN"

    # run the pyGDP
    start = time.time()
    outputPath = pyGDP.submitFeatureWeightedGridStatistics(
        shapefile,
        dataSet,
        dataType,
        timeBegin,
        timeEnd,
        user_attribute,
        user_value,
        gmlIDs,
        verbose,
        coverage,
        delim,
        stats,
    )
    end = time.time()
    print "Start time is: " + str(start)
    print "End time is: " + str(end)
    print "Total time was: " + str(end - start)
    print outputPath
    # copy the output and rename it
    shutil.copy2(outputPath, region + "_" + vt_data[0] + ".csv")

    #         #Parse the csv file
    #         index = 0
    #
    #         csvread = csv.reader(open(region+'_'+dataset[0] + '.csv', "rb"))
    #         csvwrite = csv.writer(open(dataType[0] + '.csv', "wb"))
    #
    #         index = 0
    #
    #         temp = csvread
    #         var = temp.next()
    #         #Gets gage ids
    #         gage = temp.next()
    #
    #         #Writes current variable to csv file
    #         csvwrite.writerow(var)
    #         #Writes all gage ids to csv file
    #         csvwrite.writerow(gage)
    #
    #         for variable in dataType:
    #
    #             for row in csvread:
    #                 #if on last variable
    #                 if variable == dataType[len(dataType) - 1]:
    #                     csvwrite.writerow(row)
    #                 else:
    #                     if (row[0] in '#'+dataType[index+1]) or (row[0] in '# '+dataType[index+1]):
    #                         #Line 33 is used for titling the csv file the name of the variable (like tmin, ppt, or tmax)
    #                         var = '#'+dataType[index+1]
    #                         csvwrite = csv.writer(open(dataType[index+1] + '.csv', "wb"))
    #                         row[1:] = ""
    #                         row[0] = var
    #                         csvwrite.writerow(row)
    #                         csvwrite.writerow(gage)
    #
    #                         if len(dataType) == 2:
    #                             csvread.next()
    #                         else:
    #                             csvread.next()
    #                             csvwrite.writerow(csvread.next())
    #                             csvwrite.writerow(csvread.next())
    #                         break
    #                     else:
    #                         if dataType[index+1] not in row[0] and row[0] not in dataType[index+1]:
    #                             csvwrite.writerow(row)
    #             print "Finished parsing " + variable + ".csv"
    #             parsedFiles.append(os.getcwd()+'\\'+variable+'.csv')
    #             # use index to keep track of next variable
    #             if (index + 1) < len(dataType):
    #                 index += 1

    timeend = time.time()
    print "Start time of pyGDP: " + str(timestart)
    print "End time of pyGDP: " + str(timeend)
    print "Total time of pyGDP: " + str(timeend - timestart)

    os.chdir(dir)
Пример #14
0
def main_func(region):
    #NHDplus region
    region = 'R06'
    #region = pyGDP.getRegion()
    # change working directory so the GDP output will be written there
        
    # Datasets and their properties
    #**********************************
    Gridded_Observed_Data_1950_1999 = ['Gridded Observed Data(1950-1999)', 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml', 'Daily', '1950-01-01T00:00:00.000Z', '1999-12-31T00:00:00.000Z', 'Prcp', 'Tavg']
    Gridded_Observed_Data_1949_2010 = ['Gridded Observed Data(1949-2010)', 'http://cida.usgs.gov/thredds/dodsC/new_gmo', 'Daily', '1950-01-01T00:00:00.000Z', '1950-02-01T00:00:00.000Z', 'pr', 'tas']
    PRISM = ['PRISM', 'http://cida.usgs.gov/thredds/dodsC/prism', 'Monthly', '1895-01-01T00:00:00.000Z', '2012-12-31T00:00:00.000Z', 'ppt', 'tmx', 'tmn']
    DAYMET = ['DAYMET', 'dods://cida-eros-mows1.er.usgs.gov:8080/thredds/dodsC/daymet', 'Daily', '1980-01-01T00:00:00.000Z', '2012-01-01T00:00:00.000Z', 'prcp', 'tmax', 'tmin']
    MPI_ECHAM5 = ['MPI ECHAM5', 'http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/EH5/merged/Monthly/RegCM3_Monthly_merged_EH5.ncml', 'Monthly', '1968-01-01T00:00:00.000Z', '2099-12-31T00:00:00.000Z', 'RT', 'TA']
    GENMON = ['GENMON', 'http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/GENMOM/merged/Monthly/RegCM3_Monthly_merged_GENMOM.ncml', 'Monthly', '1980-01-01T00:00:00.000Z', '2089-12-31T00:00:00.000Z', 'RT', 'TA']
    GFDL_CM_2_0 = ['GFDL CM 2.0', 'http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/GFDL/merged/Monthly/RegCM3_Monthly_merged_GFDL.ncml', 'Monthly', '1980-01-01T00:00:00.000Z', '2069-12-31T00:00:00.000Z', 'RT', 'TA']
    NOAA_NCEP = ['NOAA NCEP', 'http://regclim.coas.oregonstate.edu:8080/thredds/dodsC/regcmdata/NCEP/merged/Monthly/RegCM3_Monthly_merged_NCEP.ncml','Monthly', '1982-01-01T00:00:00.000Z', '2007-12-31T00:00:00.000Z','RT','TA']
    
    #List dataset names
    data = [Gridded_Observed_Data_1950_1999, Gridded_Observed_Data_1949_2010,PRISM,DAYMET,MPI_ECHAM5,GENMON,GFDL_CM_2_0,NOAA_NCEP]
    #data=[Gridded_Observed_Data_1950_1999]
    
    # link to region 6 hrus on ScienceBase
    # put a function here to retrieve sciencebase url and number of hrus for specific region
    pyGDP.WFS_URL = 'https://www.sciencebase.gov/catalogMaps/mapping/ows/51b0d374e4b030b519830d73'
    
    #Get pyGDP.WFS_URL from vistrials
    #pyGDP.WFS_URL = pyGDP.get_pyGDP_WFS_URL()
    
    # region 6 = 2303 hrus
    # for more info read:
    # science base access:  https://my.usgs.gov/confluence/display/GeoDataPortal/pyGDP+FAQ
    
    # call web processing module
    # will automatically use shapefiles from GDP as default unless
    # you define web service
    pyGDP = pyGDP.pyGDPwebProcessing()
    
    # get list of shapefiles uploaded to the GDP
    shapefiles = pyGDP.getShapefiles()
    for shp in shapefiles:
        print shp
    
    # feature loaded from sciencebase
    # automate region number
    shapefile = 'sb:R06a_hru'
    
    # shapefile/feature attribute for which you are summarizing info
    user_attribute = 'hru_id_loc'
    
    # create list of hru_ids that can be used for the uservalue
    # need to automate number of hurs in shapefile, maybe include from scienecbase
    # url retrieval above lin 25-29
   
    
    # single feature id test case
    #user_value = '10'
    # to summarize for all ids
    user_value = None
    
    for dataset in data:
        print "The current dataset being worked on is: " + dataset[0]
    
        #The url of each dataset
        dataSet = dataset[1]
    
        #The precipitation and temperatures of each dataset
        #Start at position(not index) 5 until the end of the
        #dictionary's key(which is a list)
        dataType=dataset[5:]
    
        # daily or monthly for additional aggregation/formatting (not appended yet)
        timeStep = dataset[2]
    
        #Start date
        timeBegin = dataset[3]
        #End date
        timeEnd = dataset[4]
    
        # data processing arguments
        gmlIDs=None
        verbose=True
        #coverage = 'false' check if on US border/ocean
        coverage='true'
        delim='COMMA'
        stats='MEAN'
    
        # run the pyGDP
        outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute, user_value, gmlIDs, verbose, coverage, delim, stats)
        print outputPath
        #pyGDP.setCSV_file(outputPath)
        # copy the output and rename it
        shutil.copy2(outputPath, region+'_'+dataset[0]+'.csv')
        
        csvfile = os.getcwd()+region+'_'+dataset[0]+'.csv'
        
        #Parse the csv file 
        index = 0
        #parsedFiles = []
            
        csvread = csv.reader(open(outputPath + '.csv', "rb")) 
        csvwrite = csv.writer(open(dataType[0] + '.csv', "wb"))
        
        
        temp = csvread
        var = temp.next()
        #Gets gage ids
        gage = temp.next()
        
        #Writes current variable to csv file
        csvwrite.writerow(var)
        #Writes all gage ids to csv file
        csvwrite.writerow(gage)
        
        for variable in dataType:                
                
            for row in csvread:
                #if on last variable     
                if variable == dataType[len(dataType) - 1]: 
                    csvwrite.writerow(row)               
                else:  
                    if (row[0] in '#'+dataType[index+1]) or (row[0] in '# '+dataType[index+1]):
                        #Line 33 is used for titling the csv file the name of the variable (like tmin, ppt, or tmax)
                        var = '#'+dataType[index+1]
                        parsedFiles.append(os.getcwd()+'\\'+variable+'.csv')
                        csvwrite = csv.writer(open(dataType[index+1] + '.csv', "wb"))
                        row[1:] = ""
                        row[0] = var
                        csvwrite.writerow(row)
                        csvwrite.writerow(gage)
                        if len(dataType) == 2:
                            csvread.next()
                        else:
                            temp = csvread
                            csvwrite.writerow(csvread.next())
                            csvwrite.writerow(temp.next())
                        break
                    else:
                        if dataType[index+1] not in row[0] and row[0] not in dataType[index+1]:
                            csvwrite.writerow(row)
            print "Finished parsing " + variable + ".csv"
            parsedFiles.append(os.getcwd()+'\\'+variable+'.csv')
            # use index to keep track of next variable
            if (index + 1) < len(dataType):
                index += 1
          # 'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/ncml/days_prcp_abv_cmip5_hist_der.ncml',
          # 'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/ncml/cooling_degree_day_cmip5_hist_der.ncml']

for dataURI in dataURIs:
    remote_dataURI=dataURI.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/','http://cida-eros-thredds3.er.usgs.gov:8080/thredds/dodsC/cmip5_bcca/derivatives/')
    print(remote_dataURI)
    dataTypes = pyGDP.getDataType(remote_dataURI)
    timeRange = pyGDP.getTimeRange(remote_dataURI, dataTypes[0])
    if len(dataTypes)==1:
        dataTypes=dataTypes[0]
    for shapefile in shapefiles.keys():
        outputfilename=shapefile.replace('derivative:','')+'_'+dataURI.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/ncml/','')
        outputfilename=outputfilename.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_rcp/ncml/','')
        outputfilename=outputfilename.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_hist/ncml/','')
        outputfilename=outputfilename.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_gmo/ncml/','')
        outputfilename=outputfilename.replace('http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/cmip5_obs_der/','')
        if not os.path.isfile(outputfilename):
            open(outputfilename, 'a').close()
            print shapefile
            print dataURI
            print dataTypes
            print timeRange
            print shapefiles[shapefile]
            outFile=pyGDP.submitFeatureWeightedGridStatistics(geoType=shapefile,
                                                                  dataSetURI=dataURI,
                                                                  varID=dataTypes,
                                                                  startTime=timeRange[0],
                                                                  endTime=timeRange[1],
                                                                  attribute=shapefiles[shapefile],
                                                                  coverage=False,
                                                                  outputfname=outputfilename)
Пример #16
0
value='Au Sable'

### Land Use Land Cover
# This dataset URI will be available here: http://cida.usgs.gov/thredds/
# At the time of creation, this dataset was internal USGS only.
datasetURI='https://cida.usgs.gov/thredds/dodsC/ssebopeta/monthly'

# Note that pyGDP offers convenience functions to determine what these options are.
# These were derived from the thredds server housing the data.
dataType = 'et'
timeStart = '2000-01-01T00:00:00.000Z'
timeEnd   = '2010-01-01T00:00:00.000Z'

outputFileName = 'ETAuSable2000-2010.csv'

outputFile_handle = pyGDP.submitFeatureWeightedGridStatistics(shapefile, datasetURI, dataType, timeStart, timeEnd, attribute, value,verbose=True)
os.rename(outputFile_handle,outputFileName)

dates = []
vals  = []
datesT,valsT = numpy.loadtxt(outputFileName,unpack=True,skiprows=3,delimiter=',',
        converters={0: mdates.strpdate2num("%Y-%m-%dT%H:%M:%SZ")})

for v in valsT:
    vals.append(v)

for d in datesT:
    dates.append(d)

fig = pylab.figure(figsize=(12,6),facecolor='w')
fig.suptitle('SSEBop Actual Evapotranspiration, Au Sable Watershed',fontsize=26)
Пример #17
0
# Set our datasetURI
dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/prism'
# Get the available data types associated with the dataset
dataType = 'ppt'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t
timeBegin = '1900-01-01T00:00:00.000Z'
timeEnd = '1901-01-01T00:00:00.000Z'
print

textFile = pyGDP.submitFeatureWeightedGridStatistics(OKshapefile,
                                                     dataSetURI,
                                                     dataType,
                                                     timeBegin,
                                                     timeEnd,
                                                     usr_attribute,
                                                     usr_value,
                                                     verbose=True)

jd, precip = np.loadtxt(
    textFile,
    unpack=True,
    skiprows=3,
    delimiter=',',
    converters={0: mdates.strpdate2num('%Y-%m-%dT%H:%M:%SZ')})

print 'Some data:'
print precip[0:100]
Пример #18
0
outputpath_earlys = list()
outputpath_lates = list()

for datatype in datatypes:
    if 'rcp85' in datatype:
        #if 'tasmax' in datatype and 'rcp85' in datatype:
        print(datatype)
        outName = 'Loca_early_Future_' + datatype + '.csv'
        if os.path.isfile(outName):
            outputpath_earlys.append(outputPath)
        else:
            outputPath = pyGDP.submitFeatureWeightedGridStatistics(
                shapefile,
                dataSetURI,
                datatype,
                '2006-01-01T00:00:00Z',
                '2009-12-31T00:00:00Z',
                polyAttribute,
                polyValue,
                outputfname=outName)
            print(outputPath)
            outputpath_earlys.append(outputPath)
            outName = 'Loca_late_Feature_' + datatype + '.csv'
        if os.path.isfile(outName):
            outputpath_lates.append(outputPath)
        else:
            outputPath = pyGDP.submitFeatureWeightedGridStatistics(
                shapefile,
                dataSetURI,
                datatype,
                '2096-01-01T00:00:00Z',
Пример #19
0
# In[9]:

# Set the dataType. Note that leaving dataType out below will select all.
# Get available time range on the dataset
dataType='prcp'
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t


# In[14]:

# Submit job to pyGDP
# result is area-weighted mean in CSV for given datatype
path = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeRange[0], timeRange[1], usr_attribute, usr_values, gmlIDs=None, verbose=True)
print path
import shutil
shutil.copy2(path, names+'_'+dataType)


# In[20]:

datatype="tmax"
path = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeRange[0], timeRange[1], usr_attribute, usr_values, gmlIDs=None, verbose=True)
print path
import shutil
shutil.copy2(path, names+'_'+dataType)


# In[17]:
# Grab the file and get its attributes:
shapefile = 'sample:CSC_Boundaries'
attributes = pyGDP.getAttributes(shapefile)
for attr in attributes:
    print attr

# Grab the values from 'area_name' and 'sample:CSC_Boundaries'
usr_attribute = 'area_name'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print v

usr_value = 'Southwest'

dataSetURI = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataTypes = pyGDP.getDataType(dataSetURI)
for d in dataTypes:
    print d

dataType = 'Prcp'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t

timeBegin = '1960-01-01T00:00:00.000Z'
timeEnd = '1960-01-21T00:00:00.000Z'
outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeBegin, timeEnd, usr_attribute, usr_value, gmlIDs=None, verbose=True)

print

# Grab the values from 'OBJECTID' and 'upload:OKCNTYD'
usr_attribute = 'STATE'
values = pyGDP.getValues(shapefile, usr_attribute)
for v in values:
    print v
print

# our shapefile = 'upload:OKCNTYD', usr_attribute = 'OBJECTID', and usr_value = 13
# We get the dataset URI that we are interested in
dataSetURIs = pyGDP.getDataSetURI()
for d in dataSetURIs:
    print d

# Set our datasetURI
dataSetURI = 'dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/gmo/GMO_w_meta.ncml'
# Get the available data types associated with the dataset
dataType = 'Prcp'
# Get available time range on the dataset
timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
for t in timeRange:
    print t

"""
Instead of submitting in a value, we submit a list of gmlIDs associated
with either a small portion of that value, or multiple values.
"""
values = ['Wisconsin', 'Michigan', 'Minnesota']
path = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSetURI, dataType, timeRange[0], timeRange[0], usr_attribute, values)
print path
Пример #22
0
fileList = []

for yr in years:

    datasetURI = '%s%s%s' % (baseURI, yr, '.nc')

    timeRange = pyGDP.getTimeRange(datasetURI, varID)

    timeStart = timeRange[0]

    timeEnd = timeRange[1]

    print('begining process request for %s on year %s' % (varID, yr))

    outputFile_handle = pyGDP.submitFeatureWeightedGridStatistics(
        userPoly, datasetURI, varID, timeStart, timeEnd)

    fileList.append(outputFile_handle)

fig = pylab.figure(figsize=(12, 6), facecolor='w')

ax = fig.gca()

ax.set_ylabel('Air Temperature (C)')

ax.plot_date(dates, vals, 'b-')

ax.xaxis.set_major_locator(mdates.YearLocator(5, month=1, day=1))

ax.xaxis.set_major_formatter(mdates.DateFormatter(' %Y'))
def main_func(region, currdir, timestart, timeend):#, scenarios):
    
    def Region_lookup(region):
        region_properties = []
        if region == 'nhru':
            region_properties.append(371)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d35da1e4b0ca184833940c')
            region_properties.append('false')
        elif region == 'R01':
            region_properties.append(2462)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5244735ae4b05b217bada04e')
            region_properties.append('false')
        elif region == 'R02':
            region_properties.append(4827)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52696784e4b0584cbe9168ee')
            region_properties.append('false')
        elif region == 'R03':
            region_properties.append(9899)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5283bd23e4b047efbbb57922')
            region_properties.append('false')
        elif region == 'R04':
            region_properties.append(5936)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5284ff57e4b063f258e61b9d')
            region_properties.append('false')
        elif region == 'R05':
            region_properties.append(7182)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/528516bbe4b063f258e62161')
            region_properties.append('false')
        elif region == 'R06':
            region_properties.append(2303)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d75296e4b055e0afd5be2c')
            region_properties.append('true')
        elif region == 'R07':
            region_properties.append(8205)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52851cd5e4b063f258e643dd')
            region_properties.append('true')
        elif region == 'R08':
            region_properties.append(4449)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52854695e4b063f258e6513c')
            region_properties.append('true')
        elif region == 'R10L':
            region_properties.append(8603)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/520031dee4b0ad2d97189db2')
            region_properties.append('true')
        elif region =='R10U':
            region_properties.append(10299)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5216849ee4b0b45d6ba61e2e')
            region_properties.append('false')
        elif region == 'R11':
            region_properties.append(7373)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d1f9ebe4b08b18a62d586b')
            region_properties.append('true')
        elif region == 'R12':
            region_properties.append(7815)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5201328ae4b009d47a4c247a')
            region_properties.append('false')
        elif region == 'R13':
            region_properties.append(1958)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/51d752b9e4b055e0afd5be36')
            region_properties.append('false')
        elif region == 'R14':
            region_properties.append(3879)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52029c68e4b0e21cafa4b40c')
            region_properties.append('false')
        elif region == 'R15':
            region_properties.append(3441)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/5285389ae4b063f258e64863')
            region_properties.append('false')
        elif region == 'R16':
            region_properties.append(2664)
            region_properties.append('https://www.sciencebase.gov/catalogMaps/mapping/ows/52853f97e4b063f258e64875')
            region_properties.append('false')

        return region_properties
            
    
    global parsedFiles, csvfile, vistrails_data_set, nhru, url
    import pyGDP
    #region = 'R13'
    parsedFiles = []
    vistrails_data_set = []
    
    Region_return=Region_lookup(region)
    hrus = Region_return[0]
    nhru = hrus
    ScienceBase_URL= Region_return[1]
    
    pyGDP.WFS_URL = ScienceBase_URL
    
    # call web processing module
    
    pyGDP = pyGDP.pyGDPwebProcessing()
    
    # change working directory so the GDP output will be written there
    
    # Datasets and their properties
    # run 1, 2 , and 3
    
    #**********************************
    # run 1 only
    #scenario = 'a2'
    # other scenarios are 'a1b' and 'b1'
    scenarios = ['a2','a1b'] # not running b1 or a1fi
    
    #ends 2012
    timestart = '1950-01-15T00:00:00.000Z'
    timeend='1960-12-15T00:00:00.000Z'
    
    shapefiles = pyGDP.getShapefiles()
    for shp in shapefiles:
        print shp
    
    shapefile = 'sb:nhru'
    user_attribute = 'hru_id_loc'
    
    user_value = None
    
    #os.chdir('C:\\Users\\reimandy\\workspace\\userpackages\\WaterBalanceModel\\Step1_CLIMATE_DATA\\'+region+'\\MAURERBREKE')
    dir = currdir
    
    gcmRun = '1'
    for scenario in scenarios:
        cccma_cgcm3_1_1 = ['sres'+scenario+'_cccma-cgcm3-1_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                           'sres'+scenario+'_cccma-cgcm3-1_1_Prcp',
                           'sres'+scenario+'_cccma-cgcm3-1_1_Tavg']
        gfdl_cm2_1_1_1 = ['sres'+scenario+'_gfdl-cm2-1_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                          'sres'+scenario+'_gfdl-cm2-1_1_Prcp',
                          'sres'+scenario+'_gfdl-cm2-1_1_Tavg']
        miroc3_2_medres_1 = ['sres'+scenario+'_miroc3-2-medres_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                             'sres'+scenario+'_miroc3-2-medres_1_Prcp',
                             'sres'+scenario+'_miroc3-2-medres_1_Tavg']
        miub_echo_g_1_1 = ['sres'+scenario+'_miub-echo-g_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                           'sres'+scenario+'_miub-echo-g_1_Prcp',
                           'sres'+scenario+'_miub-echo-g_1_Tavg']
        mpi_echam5_1 = ['sres'+scenario+'_mpi-echam5_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                        'sres'+scenario+'_mpi-echam5_1_Prcp',
                        'sres'+scenario+'_mpi-echam5_1_Tavg']
        mri_cgcm2_3_2a_1 = ['sres'+scenario+'_mri-cgcm2-3-2a_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_mri-cgcm2-3-2a_1_Prcp',
                            'sres'+scenario+'_mri-cgcm2-3-2a_1_Tavg']
        # New MaurerBreke Statistically downscaled datasets (put with other MB datasets)
        bccr_bcm2_0 = ['sres'+scenario+'_bccr-bcm2-0_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                           'sres'+scenario+'_bccr-bcm2-0_'+gcmRun+'_Prcp',
                           'sres'+scenario+'_bccr-bcm2-0_'+gcmRun+'_Tavg']
        cnrm_cm3 = ['sres'+scenario+'_cnrm-cm3_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_cnrm-cm3_'+gcmRun+'_Prcp',
                            'sres'+scenario+'_cnrm-cm3_'+gcmRun+'_Tavg']
        csiro_mk3_0 = ['sres'+scenario+'_csiro-mk3-0_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_csiro-mk3-0_'+gcmRun+'_Prcp',
                            'sres'+scenario+'_csiro-mk3-0_'+gcmRun+'_Tavg']
        giss_model_e_r = ['sres'+scenario+'_giss-model-e-r_2','dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                           'sres'+scenario+'_giss-model-e-r_2_Prcp',
                           'sres'+scenario+'_giss-model-e-r_2_Tavg']
        inmcm3_0 = ['sres'+scenario+'_inmcm3-0_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                          'sres'+scenario+'_inmcm3-0_'+gcmRun+'_Prcp',
                          'sres'+scenario+'_inmcm3-0_'+gcmRun+'_Tavg']
        ipsl_cm4 = ['sres'+scenario+'_ipsl-cm4_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                          'sres'+scenario+'_ipsl-cm4_'+gcmRun+'_Prcp',
                          'sres'+scenario+'_ipsl-cm4_'+gcmRun+'_Tavg']
        ncar_ccsm3_0 = ['sres'+scenario+'_ncar-ccsm3-0_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_ncar-ccsm3-0_'+gcmRun+'_Prcp',
                            'sres'+scenario+'_ncar-ccsm3-0_'+gcmRun+'_Tavg']
        ncar_pcm1 = ['sres'+scenario+'_ncar-pcm1_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_ncar-pcm1_'+gcmRun+'_Prcp',
                            'sres'+scenario+'_ncar-pcm1_'+gcmRun+'_Tavg']
        ukmo_hadcm3 = ['sres'+scenario+'_ukmo-hadcm3_'+gcmRun,'dods://cida.usgs.gov/thredds/dodsC/maurer/maurer_brekke_w_meta.ncml','Monthly',timestart,timeend,\
                            'sres'+scenario+'_ukmo-hadcm3_'+gcmRun+'_Prcp',
                            'sres'+scenario+'_ukmo-hadcm3_'+gcmRun+'_Tavg']
    
        data = [cccma_cgcm3_1_1,gfdl_cm2_1_1_1,miroc3_2_medres_1,miub_echo_g_1_1,mpi_echam5_1,mri_cgcm2_3_2a_1]
    
        for dataset in data:
            if len(scenario) == 2:
                name = dataset[0]
                name = name[7:]
            else:
                name = dataset[0]
                name = name[8:]
            file_loc = str(dir.name)+'\\Step1_CLIMATE_DATA\\'+region+'\\'+scenario+'\\'+name
            if not os.path.exists(file_loc):
                os.mkdir(file_loc)
            os.chdir(file_loc)
            print "The current dataset being worked on is: " + name
    
            dataSet = dataset[1]
    
            dataType = dataset[5:]
    
            timestep = dataset[2]
    
            timeBegin = dataset[3]
            timeEnd = dataset[4]
    
            gmlIDs = None
            verbose = True
    
            coverage = Region_return[2]
            delim = 'COMMA'
            stats = 'MEAN'
    
            start = time.time()
            outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute, user_value, gmlIDs, verbose, coverage, delim, stats)
            end = time.time()
            
            print "Start time is: " + str(start)
            print 'End time is: ' + str(end)
            print 'Total time was: ' + str(end-start)
            print outputPath
    
            shutil.copy2(outputPath, region+'_'+name+'.csv')
            
            csvfile = os.getcwd()+region+'_'+name+'.csv'

#             dataType = ['Prcp', 'Tavg']
#             vistrails_data_set = ['Prcp', 'Tavg']
            
            #csvread = csv.reader(open(region+'_'+name+'.csv', 'rb'))
            
            #csvwrite = csv.writer(open(dataType[0]+'.csv', 'wb'))
            #parsedFiles.append(dataType[0]+'.csv')
            #index = 0
    
            #temp = csvread
            #var = temp.next()
            #var[0] = '#'+dataType[0]
    
            #gage = temp.next()
    
            #csvwrite.writerow(var)
            #csvwrite.writerow(gage)
    
#             for variable in dataType:
#     
#                 for row in csvread:
#                     if variable == dataType[len(dataType) - 1]:
#                         csvwrite.writerow(row)
#                     else:
#                         if (row[0] in '#'+dataType[index+1]) or (row[0] in '# ' + dataType[index+1]):
#                             var = '#'+dataType[index+1]
#                             csvwrite = csv.writer(open(dataType[index+1] + '.csv', 'wb'))
#                             parsedFiles.append(dataType[index+1]+'.csv')
#                             row[1:] = ''
#                             row[0] = var
#                             csvwrite.writerow(row)
#                             csvwrite.writerow(gage)
#     
#                             if len(dataType) == 2:
#                                 csvwrite.writerow(csvread.next())
#                             else:
#                                 csvread.next()
#                                 csvwrite.writerow(csvread.next())
#                                 csvwrite.writerow(csvread.next())
#                             break
#                         else:
#                             if dataType[index+1] not in row[0] and row[0] not in dataType[index+1]:
#                                 csvwrite.writerow(row)
#                 print 'Finished parsing ' + variable + '.csv'
#     
#                 if (index+1)<len(dataType):
#                     index += 1
#     
#                 os.chdir(dir)
    
    
#main_func('nhru', os.getcwd(), '', '')
baseURI = 'http://cida.usgs.gov/thredds/dodsC/loca_future'
startTime = 1950
endTime = 2100
varID = 'pr_CCSM4_r6i1p1_rcp45'

#['tasmax_CCSM4_r6i1p1_rcp45', 'tasmin_CCSM4_r6i1p1_rcp45']

years = array.array('i', (i for i in range(startTime, endTime + 1)))
fileList = []

for yr in years:
    datasetURI = '%s%s%s' % (baseURI, yr, '.nc')
    timeRange = pyGDP.getTimeRange(datasetURI, varID)
    timeStart = timeRange[0]
    timeEnd = timeRange[1]
    print "process beginning for %s on year %s'" % (varID, yr)
    outputFile_handle = pyGDP.submitFeatureWeightedGridStatistics(
        self,
        geoType,
        dataSetURI,
        varID,
        startTime,
        endTime,
        '',
        '',
        '',
        '',
        coverage='true',
        delim='COMMA')
    fileList.append(outputFile_handle)
# Note that removing "value" in the main request below will run all values in the shapefile.
value = ['Delaware']

# Search for datasets
dataSetURIs = pyGDP.getDataSetURI(anyText='wicci')
for dataset in dataSetURIs:
    print(dataset)

# Loop through datasets of interest, in this case the first three OPeNDAP urls.
for dataSetURI in dataSetURIs[1][2][0:3]:
    # Get the available data types associated with the dataSetURI
    dataTypes = pyGDP.getDataType(dataSetURI)
    print(dataTypes)

    # For this example just run the first dataType. This dataType list should be modified if multiple
    # datatypes are required.
    dataType = dataTypes[0]
    print(dataType)

    # Get available time range for the dataset.
    timeRange = pyGDP.getTimeRange(dataSetURI, dataType)
    print(timeRange)

    # Execute a GeatureWeightedGridStatistics request and return the path to the output file.
    # Note that this is for one time step but could be for multiple. Please test on very short
    # time periods to minimize impacts on system resources.
    outputfile = pyGDP.submitFeatureWeightedGridStatistics(
        shapefile, dataSetURI, dataType, timeRange[0], timeRange[0],
        usr_attribute, value)
    print(outputfile)
import pyGDP

pyGDP = pyGDP.pyGDPwebProcessing()
"""
This example shows how to use multiple dataTypes and Statistics.

"""

shapefile = 'sample:simplified_huc8'
user_attribute = 'SUBBASIN'
user_value = 'Baraboo'
dataSet = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataType = ['Prcp', 'Tavg', 'Tmax', 'Tmin']
timeBegin = '1970-01-24T00:00:00.000Z'
timeEnd = '1970-01-25T00:00:00.000Z'
gmlIDs = None
verbose = True
coverage = 'true'
delim = 'COMMA'
stats = ['MEAN', 'STD_DEV']

print 'Processing request.'
outputPath = pyGDP.submitFeatureWeightedGridStatistics(
    shapefile, dataSet, dataType, timeBegin, timeEnd, user_attribute,
    user_value, gmlIDs, verbose, coverage, delim, stats)
from __future__ import print_function
import pyGDP

pyGDP = pyGDP.pyGDPwebProcessing()

"""
This example shows how to use multiple dataTypes and Statistics.

"""

shapefile = 'sample:simplified_huc8'
user_attribute = 'SUBBASIN'
user_value = 'Baraboo'
dataSet = 'dods://cida.usgs.gov/thredds/dodsC/gmo/GMO_w_meta.ncml'
dataType = ['Prcp', 'Tavg', 'Tmax', 'Tmin']
timeBegin = '1970-01-24T00:00:00.000Z'
timeEnd = '1970-01-25T00:00:00.000Z'
gmlIDs = None
verbose = True
coverage = 'true'
delim = 'COMMA'
stats = ['MEAN', 'STD_DEV']

print('Processing request.')
outputPath = pyGDP.submitFeatureWeightedGridStatistics(shapefile, dataSet, dataType, timeBegin, timeEnd,
                                                       user_attribute, user_value, gmlIDs, verbose,
                                                       coverage, delim, stats)
Пример #28
0
 ) + '_' + dataURI.replace(
     'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/ncml/',
     '')
 outputfilename = outputfilename.replace(
     'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_rcp/ncml/',
     '')
 outputfilename = outputfilename.replace(
     'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_hist/ncml/',
     '')
 outputfilename = outputfilename.replace(
     'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/averages_gmo/ncml/',
     '')
 outputfilename = outputfilename.replace(
     'http://localhost:8080/thredds/dodsC/Scratch/thredds/bcca/bcca/cmip5/derivatives/cmip5_obs_der/',
     '')
 if not os.path.isfile(outputfilename):
     open(outputfilename, 'a').close()
     print shapefile
     print dataURI
     print dataTypes
     print timeRange
     print shapefiles[shapefile]
     outFile = pyGDP.submitFeatureWeightedGridStatistics(
         geoType=shapefile,
         dataSetURI=dataURI,
         varID=dataTypes,
         startTime=timeRange[0],
         endTime=timeRange[1],
         attribute=shapefiles[shapefile],
         coverage=False,
         outputfname=outputfilename)