def makePixelAreaArray(inputHDF5template_path,outputASCII_path):

    # import header information from a template hdf5 file
    hdrDict=getAsciiheaderFromTemplateHDF5(inputHDF5template_path)

    # for first column in grid:

    ## define vector of lower edge latitudes for each pixel in a column from the grid
    llclat = hdrDict['yllcorner'] + (np.arange(hdrDict['nrows'])*hdrDict['cellsize'])

    ## define vector of upper edge latitudes
    urclat = llclat+hdrDict['cellsize']

    ## define left-hand edge longitude
    llclon = hdrDict['xllcorner']

    ## define right-hand edge longitude
    urclon = llclon+hdrDict['cellsize']

    # call funtion to return vector of areas for pixels in this column
    pixel_areas_firstcolumn = cylindrical_pixel_area(llclon, llclat, urclon, urclat)

    # now duplicate this column of pixel areas accross the full width of the array
    pixel_areas_firstcolumn = pixel_areas_firstcolumn[::-1]
    pixel_areas_array = np.vstack(((pixel_areas_firstcolumn,)*hdrDict['ncols'])).T

    # export array as ascii
    exportAscii (pixel_areas_array,outputASCII_path,hdrDict)
Example #2
0
def exportHDF5asASCII (hdfFilePath,outputpath):

    from map_utils import getAsciiheaderFromTemplateHDF5
    from map_utils import exportAscii
    import tables as tb
    
    # open link to hdf5 file
    hf = tb.openFile(hdfFilePath, mode = "r")

    # get main array
    inputarray = hf.root.data[:]   

    # get header info as dictionary
    hdrDict = getAsciiheaderFromTemplateHDF5 (hdfFilePath)

    # export as ascii
    exportAscii(inputarray,outputpath,hdrDict)
Example #3
0
def vec_to_asc(vec, fname, out_fname, unmasked, path=''):
    """
    Converts a vector of outputs on a thin, unmasked, ravelled subset of an
    ascii grid to an ascii file matching the original grid.
    """
    
    if np.any(np.isnan(vec)):
        raise ValueError, 'NaN in vec'  
    
    header, headlines = get_header(fname,path)
    lon,lat,data = asc_to_ndarray(fname,path)
    data = grid_convert(data,'y-x+','x+y+')
    data_thin = np.zeros(unmasked.shape)
    data_thin[unmasked] = vec
    
    mapgrid = np.array(mgrid[0:data.shape[0],0:data.shape[1]], dtype=float)
    normalize_for_mapcoords(mapgrid[0], data_thin.shape[0]-1)
    normalize_for_mapcoords(mapgrid[1], data_thin.shape[1]-1)
    
    if data_thin.shape == data.shape:
        out = np.ma.masked_array(data_thin, mask=data.mask)
    else:
        out = np.ma.masked_array(ndimage.map_coordinates(data_thin, mapgrid), mask=data.mask)
        
    if np.any(np.isnan(out)):
        raise ValueError, 'NaN in output'
    
    # import pylab as pl
    # pl.close('all')
    # pl.figure()
    # pl.imshow(out, interpolation='nearest', vmin=0.)
    # pl.colorbar()
    # pl.title('Resampled')
    # 
    # pl.figure()
    # pl.imshow(np.ma.masked_array(data_thin, mask=True-unmasked), interpolation='nearest', vmin=0)
    # pl.colorbar()
    # pl.title('Original')
    
    out_conv = grid_convert(out,'x+y+','y-x+')
    
    header['NODATA_value'] = -9999
    exportAscii(out_conv.data,out_fname,header,True-out_conv.mask)
    
    return out
    
    template = np.zeros(1850*1850).reshape(1850,1850)
    template[0:annualmean_mean.shape[0]:1,0:annualmean_mean.shape[1]:1]=annualmean_mean
    
    plotMapPY(1-template,flipVertical="FALSE",NODATA=1,titsuf="")

if EXPORTASCII is not "NULL":

    from map_utils import getAsciiheaderFromTemplateHDF5
    from map_utils import exportAscii

    mask = tb.openFile(lim5kmbnry_path)
    hdrDict = getAsciiheaderFromTemplateHDF5(lim5kmbnry_path)

    # export as ascii
    exportAscii(annualmean_mean,EXPORTASCII,hdrDict,mask = mask.root.data[:,:])
    


    

    #r.require('fields')
    #r.image_plot(annualmean_mean)

    #from IPython.Debugger import Pdb
    #Pdb(color_scheme='Linux').set_trace()
    

#r.segments(0.5,0.05,0.5,0.95)
#r.segments(0.4,0.05,0.4,0.95)
#r.segments(np.repeat(0.4,11),np.array([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]),np.repeat(0.5,11),np.array([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]))
def combineDistribExtractions_perpixel():

    # import limits mask to neaten up and provide ascii template for final output arrays
    mask = tb.openFile(lim5kmbnry_path)
    hdrDict = getAsciiheaderFromTemplateHDF5(lim5kmbnry_path)

    # import first file to establish array dimensions (should all be the same in this directory, and all subsequent imports will be checked for consistency)
    dirList = os.listdir(exportPathDistributed_perpixel)
    for i in  xrange(0,len(dirList)):
        fname = dirList[i]
        if fname.find('_r') == -1 | fname.find('.gz') == -1:
            print 'WARNING!! file '+str(fname)+' in '+str(exportPathDistributed_perpixel)+') is not of correct type, trying next file!!!'
            if (i==(len(dirList)-1)):
                print 'ERROR!!! no suitable files found in '+str(exportPathDistributed_perpixel)+' : EXITING!!!'
                return(-9999)
            continue
        else:
            # import first suitable file, compare its shape to mask grid, and if OK use as reference shape
            referenceshape = np.loadtxt(exportPathDistributed_perpixel+fname).shape
            if referenceshape!=mask.root.data[:,:].shape:
                print 'ERROR!!! mask shape is '+str(mask.root.data[:,:].shape)+ 'but file '+str(fname)+' has shape '+str(referenceshape)+' : EXITING!!!!'
                return(-9999)
            break

    # establish zero array in this shape that will be duplicated
    zeroMap = zeros(product(referenceshape)).reshape(referenceshape)
     
    # initialise zero arrays to sum over for means, and counters for checking 
    if do_PRMap:
        meanPR = cp.deepcopy(zeroMap)
        meanPR2 = cp.deepcopy(zeroMap) 
        meanPRtally = meanPR2tally = 0
    if do_RoMap:
        meanRo = cp.deepcopy(zeroMap)
        meanRo2 = cp.deepcopy(zeroMap) 
        meanRotally = meanRo2tally = 0
    if do_BurdenMap:
        meanBUR = cp.deepcopy(zeroMap) 
        meanBUR2 = cp.deepcopy(zeroMap)
        meanBURtally = meanBUR2tally = 0 
        
    # initialise dictionary of PCM arrays to sum over for each scheme/class
    Nschemes=len(breaksDict)    
    schemeNames=breaksDict.keys()    
    PCMdict=cp.deepcopy(breaksDict)
    
    ## ..loop through each classification scheme 
    for ss in xrange(0,Nschemes): 
        scheme=schemeNames[ss]   
        breaknames = PCMdict[scheme]['BREAKNAMES']
        Nclasses=len(breaknames) 

        # define additional sub-dictionary to add to PCMdict to house arrays for PCM per class per scheme..
        PCM = {}
        
        # and another to ouse checking tallys
        PCMtally = {}

        # .. for each class within each scheme..
        for cc in xrange(0,Nclasses):
            thisbreakname = breaknames[cc]
            
            # define an empty array for this scheme-class to house PCM
            blankarray = {thisbreakname: cp.deepcopy(zeroMap) }

            # define a zero integer for this scheme-class to house PCMtally
            zerotally = {thisbreakname: 0 }

            # add this blank array to interim PAR dictionary, and initialise the tally counter
            PCM.update(blankarray)
            PCMtally.update(zerotally)

        # add this sub-dictionary to PCMdict for this scheme
        PCM = {'PCM':PCM}
        PCMtally = {'PCMtally':PCMtally}
        PCMdict[scheme].update(PCM)
        PCMdict[scheme].update(PCMtally)

    # loop through all files in 'exportPathDistributed_perpixel' directory....
    dirList = os.listdir(exportPathDistributed_perpixel)
    for i in  xrange(0,len(dirList)):
    
        # does this file contain the '_r' string and is it a gz file - check that this is the extraction output (although should not be anything else in directory)
        fname = dirList[i]
        if fname.find('_r') == -1 | fname.find('.gz') == -1:
            print 'WARNING!! file '+str(fname)+' in '+str(exportPathDistributed_perpixel)+') is not of correct type, trying next file!!!'
            continue

        # if the string looks OK, then import the file
        importarray = np.loadtxt(exportPathDistributed_perpixel+fname)
            
        # check shape of array 
        if importarray.shape != referenceshape:
            print 'WARNING!! file '+str(fname)+ 'has shape '+str(importarray.shape)+' but reference shape is '+str(referenceshape)+', trying next file!!!'
            continue

        # deconstruct filename
        name_parts = deconstructFilename (fname)
        variable = name_parts['variable']

        # if its a meanPR file, add it's running mean values to the global meanPR array
        if variable == str('meanPR'):

            print '\nimported file : '+str(fname)
            print 'mean is : '+str(np.mean(importarray))

            meanPR = meanPR + importarray
            meanPRtally = meanPRtally+1

        # if its a meanPR2 file, add it's running mean values to the global meanPR2 array
        if variable == str('meanPR2'):
            meanPR2 = meanPR2 + importarray
            meanPR2tally = meanPR2tally+1

        # if its a meanRo file, add it's running mean values to the global meanRo array
        if variable == str('meanRo'):

            print '\nimported file : '+str(fname)
            print 'mean is : '+str(np.mean(importarray))

            meanRo = meanRo + importarray
            meanRotally = meanRotally+1

        # if its a meanRo2 file, add it's running mean values to the global meanRo2 array
        if variable == str('meanRo2'):
            meanRo2 = meanRo2 + importarray
            meanRo2tally = meanRo2tally+1
            
        # if its a meanBUR file, add it's running mean values to the global meanBUR array
        if variable == str('meanBUR'):
            meanBUR = meanBUR + importarray
            meanBURtally = meanBURtally+1

        # if its a meanBUR file, add it's running mean values to the global meanBUR2 array
        if variable == str('meanBUR2'):
            meanBUR2 = meanBUR2 + importarray
            meanBUR2tally = meanBUR2tally+1

        # if its a PCM file
        if variable == str('PCM'):
        
            # get scheme and class ID
            scheme = name_parts['scheme']        
            thisbreakname = name_parts['breakname']

            # add running PCM values to the correct global PCM array for this scheme and class, and update corresponding tally
            PCMdict[scheme]['PCM'][thisbreakname] = PCMdict[scheme]['PCM'][thisbreakname] + importarray
            PCMdict[scheme]['PCMtally'][thisbreakname] = PCMdict[scheme]['PCMtally'][thisbreakname] +1 
            
#    # run checks on tallys - they should all be the same for each variable
#    if ((meanPRtally == meanPR2tally == meanBURtally == meanBUR2tally)==False):
#        print 'WARNING!!! tallys do not match: meanPRtally='+str(meanPRtally)+' meanPR2tally='+str(meanPR2tally)+' meanBURtally='+str(meanBURtally)+' meanBUR2tally='+str(meanBUR2tally)
        
    ## ..loop through each classification scheme 
    for ss in xrange(0,Nschemes): 
        scheme=schemeNames[ss]   
        breaknames = PCMdict[scheme]['BREAKNAMES']
        Nclasses=len(breaknames) 

        # .. for each class within each scheme..
        for cc in xrange(0,Nclasses):
            thisbreakname = breaknames[cc]

            thistally = PCMdict[scheme]['PCMtally'][thisbreakname]
            if(thistally!=meanPRtally):
                print 'WARNING!!! tallys do not match: PCM tally for scheme '+str(scheme)+' class '+str(thisbreakname)+' is '+str(thistally)+' but for meanPRtally is '+str(meanPRtally)
    
    # calculate SD for each variable
    if do_PRMap:
        varPR = meanPR2 - np.square(meanPR)
        stdevPR = np.sqrt(varPR)

    if do_RoMap:
        varRo = meanRo2 - np.square(meanRo)
        stdevRo = np.sqrt(varRo)

    if do_BurdenMap:
        varBUR = meanBUR2 - np.square(meanBUR)
        stdevBUR = np.sqrt(varBUR)

    # export mean and SD arrays as asciis
    if do_PRMap:
        print '\nmeanPRtally is: '+str(meanPRtally)
        print 'mean of meanPR before export to ascii is: '+str(np.mean(meanPR))
        exportAscii(meanPR,exportPathCombined_perpixel+"meanPR.asc",hdrDict,mask = mask.root.data[:,:])
        exportAscii(stdevPR,exportPathCombined_perpixel+"stdevPR.asc",hdrDict,mask = mask.root.data[:,:])

    if do_RoMap:
        print '\nmeanRotally is: '+str(meanRotally)
        print 'mean of meanRo before export to ascii is: '+str(np.mean(meanRo))
        exportAscii(meanRo,exportPathCombined_perpixel+"meanRo.asc",hdrDict,mask = mask.root.data[:,:])
        exportAscii(stdevRo,exportPathCombined_perpixel+"stdevRo.asc",hdrDict,mask = mask.root.data[:,:])

    if do_BurdenMap:
        print '\meanBURtally is: '+str(meanBURtally)
        print 'mean of meanBUR before export to ascii is: '+str(np.mean(meanBUR))
        exportAscii(meanBUR,exportPathCombined_perpixel+"meanBUR.asc",hdrDict,mask = mask.root.data[:,:])
        exportAscii(stdevBUR,exportPathCombined_perpixel+"stdevBUR.asc",hdrDict,mask = mask.root.data[:,:])

    # for each classification scheme, define an array showing PCM to most likely class (PCMMLC) and what that most likely class is (MLC)
    for ss in xrange(0,Nschemes):             
        scheme=schemeNames[ss]                
        breaknames = PCMdict[scheme]['BREAKNAMES']             
        Nclasses=len(breaknames)

        # initialise arrays of PCMMLC and MLC            
        PCMMLC = cp.deepcopy(zeroMap)
        MLC = cp.deepcopy(zeroMap)

        # .. for each class within each scheme..            
        for cc in xrange(0,Nclasses):            
            thisbreakname = breaknames[cc]

            # update MLC if this class has higher PCM than previous highest
            MLCid = PCMdict[scheme]['PCM'][thisbreakname]>PCMMLC
            MLC[MLCid] = cc+1

            # keep running maximum PCM through the classes
            PCMMLC = np.maximum(PCMMLC,PCMdict[scheme]['PCM'][thisbreakname])

            # whilst at this loop location, export PCM for this scheme/class as ascii
            exportAscii(PCMdict[scheme]['PCM'][thisbreakname],exportPathCombined_perpixel+'PCM_'+scheme+'_'+thisbreakname+'.asc',hdrDict,mask = mask.root.data[:,:])

        # export MLC and PCMMLC for this scheme as asciis
        exportAscii(PCMMLC,exportPathCombined_perpixel+'PCMMLC_'+scheme+'.asc',hdrDict,mask = mask.root.data[:,:])
        exportAscii(MLC,exportPathCombined_perpixel+'MLC_'+scheme+'.asc',hdrDict,mask = mask.root.data[:,:])

    return()
Example #6
0
def combineDistribExtractions_perpixel():

    # import limits mask to neaten up and provide ascii template for final output arrays
    mask = tb.openFile(lim5kmbnry_path)
    hdrDict = getAsciiheaderFromTemplateHDF5(lim5kmbnry_path)

    # import first file to establish array dimensions (should all be the same in this directory, and all subsequent imports will be checked for consistency)
    dirList = os.listdir(exportPathDistributed_perpixel)
    for i in  xrange(0,len(dirList)):
        fname = dirList[i]
        if fname.find('_r') == -1 | fname.find('.gz') == -1:
            print 'WARNING!! file '+str(fname)+' in '+str(exportPathDistributed_perpixel)+') is not of correct type, trying next file!!!'
            if (i==(len(dirList)-1)):
                print 'ERROR!!! no suitable files found in '+str(exportPathDistributed_perpixel)+' : EXITING!!!'
                return(-9999)
            continue
        else:
            # import first suitable file, compare its shape to mask grid, and if OK use as reference shape
            referenceshape = np.loadtxt(exportPathDistributed_perpixel+fname).shape
            if referenceshape!=mask.root.data[:,:].shape:
                print 'ERROR!!! mask shape is '+str(mask.root.data[:,:].shape)+ 'but file '+str(fname)+' has shape '+str(referenceshape)+' : EXITING!!!!'
                return(-9999)
            break

    # establish zero array in this shape that will be duplicated
    zeroMap = zeros(product(referenceshape)).reshape(referenceshape)
     
    # initialise zero arrays to sum over for means, and counters for checking 
    meanPR = cp.deepcopy(zeroMap)
    meanPR2 = cp.deepcopy(zeroMap) 
    meanBUR = cp.deepcopy(zeroMap) 
    meanBUR2 = cp.deepcopy(zeroMap)

    meanPRtally = meanPR2tally = meanBURtally = meanBUR2tally = 0 

    # initialise dictionary of PCM arrays to sum over for each scheme/class
    Nschemes=len(breaksDict)    
    schemeNames=breaksDict.keys()    
    PCMdict=cp.deepcopy(breaksDict)
    
    ## ..loop through each classification scheme 
    for ss in xrange(0,Nschemes): 
        scheme=schemeNames[ss]   
        breaknames = PCMdict[scheme]['BREAKNAMES']
        Nclasses=len(breaknames) 

        # define additional sub-dictionary to add to PCMdict to house arrays for PCM per class per scheme..
        PCM = {}
        
        # and another to ouse checking tallys
        PCMtally = {}

        # .. for each class within each scheme..
        for cc in xrange(0,Nclasses):
            thisbreakname = breaknames[cc]
            
            # define an empty array for this scheme-class to house PCM
            blankarray = {thisbreakname: cp.deepcopy(zeroMap) }

            # define a zero integer for this scheme-class to house PCMtally
            zerotally = {thisbreakname: 0 }

            # add this blank array to interim PAR dictionary, and initialise the tally counter
            PCM.update(blankarray)
            PCMtally.update(zerotally)

        # add this sub-dictionary to PCMdict for this scheme
        PCM = {'PCM':PCM}
        PCMtally = {'PCMtally':PCMtally}
        PCMdict[scheme].update(PCM)
        PCMdict[scheme].update(PCMtally)

    # loop through all files in 'exportPathDistributed_perpixel' directory....
    dirList = os.listdir(exportPathDistributed_perpixel)
    for i in  xrange(0,len(dirList)):
    
        # does this file contain the '_r' string and is it a gz file - check that this is the extraction output (although should not be anything else in directory)
        fname = dirList[i]
        if fname.find('_r') == -1 | fname.find('.gz') == -1:
            print 'WARNING!! file '+str(fname)+' in '+str(exportPathDistributed_perpixel)+') is not of correct type, trying next file!!!'
            continue

        # if the string looks OK, then import the file
        importarray = np.loadtxt(exportPathDistributed_perpixel+fname)
            
        # check shape of array 
        if importarray.shape != referenceshape:
            print 'WARNING!! file '+str(fname)+ 'has shape '+str(importarray.shape)+' but reference shape is '+str(referenceshape)+', trying next file!!!'
            continue

        # deconstruct filename
        name_parts = deconstructFilename (fname)
        variable = name_parts['variable']

        # if its a meanPR file, add it's running mean values to the global meanPR array
        if variable == str('meanPR'):
            meanPR = meanPR + importarray
            meanPRtally = meanPRtally+1

        # if its a meanPR2 file, add it's running mean values to the global meanPR2 array
        if variable == str('meanPR2'):
            meanPR2 = meanPR2 + importarray
            meanPR2tally = meanPR2tally+1
            
        # if its a meanBUR file, add it's running mean values to the global meanBUR array
        if variable == str('meanBUR'):
            meanBUR = meanBUR + importarray
            meanBURtally = meanBURtally+1

        # if its a meanBUR file, add it's running mean values to the global meanBUR2 array
        if variable == str('meanBUR2'):
            meanBUR2 = meanBUR2 + importarray
            meanBUR2tally = meanBUR2tally+1

        # if its a PCM file
        if variable == str('PCM'):
        
            # get scheme and class ID
            scheme = name_parts['scheme']        
            thisbreakname = name_parts['breakname']

            # add running PCM values to the correct global PCM array for this scheme and class, and update corresponding tally
            PCMdict[scheme]['PCM'][thisbreakname] = PCMdict[scheme]['PCM'][thisbreakname] + importarray
            PCMdict[scheme]['PCMtally'][thisbreakname] = PCMdict[scheme]['PCMtally'][thisbreakname] +1 
            
    # run checks on tallys - they should all be the same for each variable
    if ((meanPRtally == meanPR2tally == meanBURtally == meanBUR2tally)==False):
        print 'WARNING!!! tallys do not match: meanPRtally='+str(meanPRtally)+' meanPR2tally='+str(meanPR2tally)+' meanBURtally='+str(meanBURtally)+' meanBUR2tally='+str(meanBUR2tally)
        
    ## ..loop through each classification scheme 
    for ss in xrange(0,Nschemes): 
        scheme=schemeNames[ss]   
        breaknames = PCMdict[scheme]['BREAKNAMES']
        Nclasses=len(breaknames) 

        # .. for each class within each scheme..
        for cc in xrange(0,Nclasses):
            thisbreakname = breaknames[cc]

            thistally = PCMdict[scheme]['PCMtally'][thisbreakname]
            if(thistally!=meanPRtally):
                print 'WARNING!!! tallys do not match: PCM tally for scheme '+str(scheme)+' class '+str(thisbreakname)+' is '+str(thistally)+' but for meanPRtally is '+str(meanPRtally)
    
    # calculate SD for PR and Burden
    varPR = meanPR2 - np.square(meanPR)
    stdevPR = np.sqrt(varPR)
    varBUR = meanBUR2 - np.square(meanBUR)
    stdevBUR = np.sqrt(varBUR)

    # export mean and SD arrays as asciis
    exportAscii(meanPR,exportPathCombined_perpixel+"meanPR.asc",hdrDict,mask = mask.root.data[:,:])
    exportAscii(stdevPR,exportPathCombined_perpixel+"stdevPR.asc",hdrDict,mask = mask.root.data[:,:])
    exportAscii(meanBUR,exportPathCombined_perpixel+"meanBUR.asc",hdrDict,mask = mask.root.data[:,:])
    exportAscii(stdevBUR,exportPathCombined_perpixel+"stdevBUR.asc",hdrDict,mask = mask.root.data[:,:])

    # for each classification scheme, define an array showing PCM to most likely class (PCMMLC) and what that most likely class is (MLC)
    for ss in xrange(0,Nschemes):             
        scheme=schemeNames[ss]                
        breaknames = PCMdict[scheme]['BREAKNAMES']             
        Nclasses=len(breaknames)

        # initialise arrays of PCMMLC and MLC            
        PCMMLC = cp.deepcopy(zeroMap)
        MLC = cp.deepcopy(zeroMap)

        # .. for each class within each scheme..            
        for cc in xrange(0,Nclasses):            
            thisbreakname = breaknames[cc]

            # update MLC if this class has higher PCM than previous highest
            MLCid = PCMdict[scheme]['PCM'][thisbreakname]>PCMMLC
            MLC[MLCid] = cc+1

            # keep running maximum PCM through the classes
            PCMMLC = np.maximum(PCMMLC,PCMdict[scheme]['PCM'][thisbreakname])

            # whilst at this loop location, export PCM for this scheme/class as ascii
            exportAscii(PCMdict[scheme]['PCM'][thisbreakname],exportPathCombined_perpixel+'PCM_'+scheme+'_'+thisbreakname+'.asc',hdrDict,mask = mask.root.data[:,:])

        # export MLC and PCMMLC for this scheme as asciis
        exportAscii(PCMMLC,exportPathCombined_perpixel+'PCMMLC_'+scheme+'.asc',hdrDict,mask = mask.root.data[:,:])
        exportAscii(MLC,exportPathCombined_perpixel+'MLC_'+scheme+'.asc',hdrDict,mask = mask.root.data[:,:])

    return()
def coarsenAsciiGridRes(fpathin, fpathout, coarsenBy, aggregationType, overlapOption, NAoption):
    """
    creates a new ascii with resolution coarsenBy coarser than input

    coarsenBy (int): factor to coarsen by - e.g. 5 would convert a 1km grid to a 5km grid
    aggregationType (str); can be 'MEAN' or 'SUM'
    overlapOption (str): can be 'EXPAND' or 'SHRINK' - what to do if the new resolution grid dimensions do not divide an integer number of times into the old.. 
    NAoption (str): can be 'IGNORE' which allows target cells to be non NA if valid small cells are present, or 'PRESERVE' which does the oposit - any target containng an NA will be NA
    """

    # check input parameters are valid
    if (aggregationType != "MEAN") & (aggregationType != "SUM"):
        raise ValueError("invalid parameter value for aggregationType: " + str(aggregationType))

    if (overlapOption != "EXPAND") & (overlapOption != "SHRINK") & (overlapOption != "PROHIBITED"):
        raise ValueError("invalid parameter value for overlapOption: " + str(overlapOption))
    if (overlapOption == "EXPAND") | (overlapOption == "SHRINK"):
        raise ValueError(
            "SORRY!! have bnot yet implenmented option to expand or shrink overlap - rows and cols must divide perfectly"
        )
    if (NAoption != "IGNORE") & (NAoption != "PRESERVE"):
        raise ValueError("invalid parameter value for NAoption: " + str(NAoption))

    coarsenBy = float(coarsenBy)

    f = file(fpathin, "r")

    # Extract metadata from asc file.
    ncolsIN = int(f.readline()[14:])
    nrowsIN = int(f.readline()[14:])
    xllcorner = float(f.readline()[14:])
    yllcorner = float(f.readline()[14:])
    cellsizeIN = float(f.readline()[14:])
    NODATA_value = int(f.readline()[14:])

    print "cellsizeIN: " + str(cellsizeIN)

    # define output grid parameters (taking into account whether we are expanding or shrninking in the case of overalap)
    if overlapOption == "PROHIBITED":
        if (int(ncolsIN / coarsenBy) != (ncolsIN / coarsenBy)) | (int(nrowsIN / coarsenBy) != (nrowsIN / coarsenBy)):
            raise ValueError("rows or cols do not divide perfectly into new resolution, and overlapOption == PROHIBIT")
        ncolsOUT = int(ncolsIN / coarsenBy)
        nrowsOUT = int(nrowsIN / coarsenBy)
    if overlapOption == "EXPAND":
        ncolsOUT = int(np.ceil(ncolsIN / coarsenBy))
        nrowsOUT = int(np.ceil(nrowsIN / coarsenBy))
    if overlapOption == "SHRINK":
        ncolsOUT = int(np.floor(ncolsIN / coarsenBy))
        nrowsOUT = int(np.floor(nrowsIN / coarsenBy))
    cellsizeOUT = cellsizeIN * coarsenBy

    # define output header dictionary
    headerDict = {
        "ncols": ncolsOUT,
        "nrows": nrowsOUT,
        "xllcorner": xllcorner,
        "yllcorner": yllcorner,
        "cellsize": cellsizeOUT,
        "NODATA_value": NODATA_value,
    }

    # initialise interim grid shrunk in rows, and corresponding 1-d vector to record number of small rows aggregated in each big row (for later use in mean)
    interimGrid = np.zeros(nrowsOUT * ncolsIN).reshape(nrowsOUT, ncolsIN)
    rowDenominator = np.zeros(ncolsIN)

    # initialise final grid shrunk in both rows and columns
    outgrid = np.zeros(nrowsOUT * ncolsOUT).reshape(nrowsOUT, ncolsOUT)
    colDenominator = np.zeros(nrowsOUT)

    # loop through rows and aggregate as we go
    aggCounter = 0
    rowIndexOUT = 0
    for i in xrange(nrowsIN):
        temp = np.fromstring(f.readline(), dtype=float, sep=" ")
        # print '\n'
        # print temp
        if len(temp) != ncolsIN:
            raise ValueError(
                "input row " + str(i) + "(from top) has " + str(len(temp)) + " elements, but expected " + str(ncolsIN)
            )
        keepIndex = np.where(temp != NODATA_value)
        interimGrid[rowIndexOUT, keepIndex] += temp[keepIndex]
        rowDenominator[keepIndex] += 1
        aggCounter += 1

        if aggCounter == coarsenBy:

            # print '|-------------'
            # print interimGrid[rowIndexOUT,:]
            # print '--'
            # print rowDenominator
            # print '--'
            if aggregationType == "MEAN":
                if np.any(rowDenominator != 0):
                    interimGrid[rowIndexOUT, np.where(rowDenominator != 0)] /= rowDenominator[
                        np.where(rowDenominator != 0)
                    ]
            if NAoption == "IGNORE":
                interimGrid[rowIndexOUT, np.where(rowDenominator == 0)] = NODATA_value
            if NAoption == "PRESERVE":
                interimGrid[rowIndexOUT, np.where(rowDenominator < coarsenBy)] = NODATA_value
            # print interimGrid[rowIndexOUT,:]
            # print '-------------|'
            rowIndexOUT += 1
            aggCounter = 0
            rowDenominator = np.zeros(ncolsIN)

    # import pylab as pl
    # pl.imshow(interimGrid,interpolation='nearest',origin='upper')
    # pl.show()
    # from IPython.Debugger import Pdb
    # Pdb(color_scheme='Linux').set_trace()

    # now loop accross columns of this interim grid and aggregate as we go
    aggCounter = 0
    colIndexIN = 0
    colIndexOUT = 0
    for i in xrange(ncolsIN):  # ncolsIN
        temp = interimGrid[:, colIndexIN]
        keepIndex = np.where(temp != NODATA_value)
        outgrid[keepIndex, colIndexOUT] += temp[keepIndex]
        colDenominator[keepIndex] += 1
        aggCounter += 1
        colIndexIN += 1
        if aggCounter == coarsenBy:
            if aggregationType == "MEAN":
                if np.any(colDenominator != 0):
                    outgrid[np.where(colDenominator != 0), colIndexOUT] /= colDenominator[np.where(colDenominator != 0)]
            if NAoption == "IGNORE":
                outgrid[np.where(colDenominator == 0), colIndexOUT] = NODATA_value
            if NAoption == "PRESERVE":
                outgrid[np.where(colDenominator < coarsenBy), colIndexOUT] = NODATA_value
            colIndexOUT += 1
            aggCounter = 0
            colDenominator = np.zeros(nrowsOUT)

    # print interimGrid[:,0:4:1]
    # print outgrid

    # from IPython.Debugger import Pdb
    # Pdb(color_scheme='Linux').set_trace()

    # export shrunken ascii
    exportAscii(outgrid, fpathout, headerDict)
Example #8
0
    template = np.zeros(1850 * 1850).reshape(1850, 1850)
    template[0:annualmean_mean.shape[0]:1,
             0:annualmean_mean.shape[1]:1] = annualmean_mean

    plotMapPY(1 - template, flipVertical="FALSE", NODATA=1, titsuf="")

if EXPORTASCII is not "NULL":

    from map_utils import getAsciiheaderFromTemplateHDF5
    from map_utils import exportAscii

    mask = tb.openFile(lim5kmbnry_path)
    hdrDict = getAsciiheaderFromTemplateHDF5(lim5kmbnry_path)

    # export as ascii
    exportAscii(annualmean_mean,
                EXPORTASCII,
                hdrDict,
                mask=mask.root.data[:, :])

    #r.require('fields')
    #r.image_plot(annualmean_mean)

    #from IPython.Debugger import Pdb
    #Pdb(color_scheme='Linux').set_trace()

#r.segments(0.5,0.05,0.5,0.95)
#r.segments(0.4,0.05,0.4,0.95)
#r.segments(np.repeat(0.4,11),np.array([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]),np.repeat(0.5,11),np.array([0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95]))