Example #1
0
 def preProcessWork_ForDownloadTypes(self, request):
     if (self.isDownloadJob == True):
         if (self.dj_OperationName == "download"):
             theJobID = None
             try:
                 self.logger.info(
                     "(" + self.name +
                     "):preProcessWork_ForDownloadTypes: Pre_Processing a Download Data Job. "
                     + str(request['uniqueid']))
                 theJobID = request['uniqueid']
                 outFileFolder = params.zipFile_ScratchWorkspace_Path + str(
                     theJobID) + "/"
                 extractTif.create_Scratch_Folder(outFileFolder)
             except:
                 pass
         elif (self.dj_OperationName == "download_all_climate_datasets"):
             # Placeholder for download_all_climate_datasets operations.... not even sure if going to use this here..
             pass
     else:
         # This is a statistical  do nothing
         return
Example #2
0
    def postProcessWork_ForDownloadTypes(self, request):
        if (self.isDownloadJob == True):
            if (self.dj_OperationName == "download"):
                theJobID = None
                try:
                    self.logger.info(
                        "(" + self.name +
                        "):postProcessWork_ForDownloadTypes: Post_Processing a Download Data Job. "
                        + str(request['uniqueid']))
                    theJobID = request['uniqueid']

                    # Zip the files
                    zipFilePath, errorMessage = extractTif.zip_Extracted_Tif_Files_Controller(
                        theJobID)
                    if (errorMessage == None):
                        self.logger.info(
                            "(" + self.name +
                            "):postProcessWork_ForDownloadTypes: Tif files have been zipped to: "
                            + str(zipFilePath))
                    else:
                        self.logger.info(
                            "(" + self.name +
                            "):postProcessWork_ForDownloadTypes: ERROR ZIPPING TIF FILES.  errorMessage: "
                            + str(errorMessage))

                except:
                    pass
            elif (self.dj_OperationName == "download_all_climate_datasets"):
                # Placeholder for download_all_climate_datasets operations.... not even sure if going to use this here..
                pass
        else:
            # This is a statistical  do nothing
            return

        # Placeholder

        pass
Example #3
0
def getDayValue(year, month, day, bounds, clippedmask, dataType,
                operationsType, polygon_Str_ToPass, uid):  # geometryToClip
    '''
    
    :param year:
    :param month:
    :param day:
    :param bounds:
    :param clippedmask:
    :param dataType: This is actually the datatype number (int)
    :param operationsType:
    '''
    # print "Getting Day value ",year,month,day
    #Single item in one dimension
    #Calculate index for the day using 31 days in every month
    logger.debug("getDay Value year=" + str(year) + "  month=" + str(month) +
                 " day=" + str(day) + " datatype=" + str(dataType))

    # KS Refactor 2015 // This is where I'm intercepting the code to add the new 'download' operation at the worker thread level
    if (params.parameters[operationsType][1] == 'download'):
        # Do the download stuff
        #logger.debug("DataCalculator:getDayValue: TODO: Finish the code that creates a tif file from all the inputs we have here!")
        onErrorReturnValue = 0  # 0 for failures?  (555 is just a place holder to see if this all works!!)
        try:

            # Param Checking   (Compared to the test controller function in HDFDataToFile)
            theDataTypeNumber = dataType  # formerly 'theDataType'
            size = params.getGridDimension(int(theDataTypeNumber))
            geotransform, wkt = rp.getSpatialReference(int(theDataTypeNumber))
            theBounds = bounds  #mg.getPolyBoundsOnly(geoTrans,polygon):

            #polygon_Str_ToPass
            #geometry = geometryToClip # Had to pipe this one in as a new dictionary param from the head processor!!!
            geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)

            theYear = year  # Get this from param 'year'  (Passed in as part of a dictionary object)  (also applies for month, and day)
            theMonth = month
            theDay = day

            # Worker Section
            theStore = dStore.datastorage(theDataTypeNumber, theYear)
            theIndexer = params.dataTypes[theDataTypeNumber]['indexer']
            theFillValue = params.getFillValue(theDataTypeNumber)
            theIndex = theIndexer.getIndexBasedOnDate(theDay, theMonth,
                                                      theYear)

            hdf_Data_Array = None
            try:
                hdf_Data_Array = theStore.getData(theIndex, bounds=theBounds)

            except:

                firstErrorMessage = str(sys.exc_info())
                logger.debug(
                    "DataCalculator: Download Job ERROR getting data from H5 to hdf_Data_Array: We are inside 2 try/except blocks.  firstErrorMessage:  "
                    + str(firstErrorMessage) +
                    ",  Trying something crazy before bailing out!")
                # Last ditch effort, lets replace the buggy h5py functions
                try:
                    # This did not work... it actually caused a crash that looked worse than the other one.
                    #h5py._hl.selections._translate_slice = _ReplacementForFunc_translate_slice
                    #hdf_Data_Array = theStore.getData_AlternateH5PyFunc(theIndex, _ReplacementForFunc_translate_slice, bounds=theBounds)

                    # This did not work either, it ended up selecting inverse x range
                    #hdf_Data_Array = theStore.getData_AlternateH5PyFunc(theIndex, bounds=theBounds)
                    # Wrote a bit of code in my
                    # Next attempt is to get two sets of bounds and two sets of datasets.... and then stitch them together!!
                    # Here is the near final version of this
                    breakPoint = 0  # I seriously can't believe I just wrote this block of code without testing it, and it seemed to work the first try!!
                    theBounds_Part1 = (theBounds[0], (breakPoint - 1),
                                       theBounds[2], theBounds[3])
                    theBounds_Part2 = (breakPoint, theBounds[1], theBounds[2],
                                       theBounds[3])
                    hdf_Data_Array_Part1 = theStore.getData(
                        theIndex, bounds=theBounds_Part1)
                    hdf_Data_Array_Part2 = theStore.getData(
                        theIndex, bounds=theBounds_Part2)
                    theHeight_Of_New_Array = hdf_Data_Array_Part1.shape[0]
                    theWidth_Of_New_Array = hdf_Data_Array_Part1.shape[
                        1] + hdf_Data_Array_Part2.shape[1]
                    stitchedData_Array = np.zeros(
                        shape=(theHeight_Of_New_Array, theWidth_Of_New_Array),
                        dtype=np.float32)
                    for currentRowIndex in range(0, theHeight_Of_New_Array):
                        tempRow = np.zeros(shape=(theWidth_Of_New_Array),
                                           dtype=np.float32)
                        for currValueIndex_1 in range(
                                0, hdf_Data_Array_Part1.shape[1]):
                            currentValue = hdf_Data_Array_Part1[
                                currentRowIndex][currValueIndex_1]
                            tempRow[currValueIndex_1] = currentValue
                        for currValueIndex_2 in range(
                                0, hdf_Data_Array_Part2.shape[1]):
                            currentValueIndex_2_Adjusted = currValueIndex_2 + hdf_Data_Array_Part1.shape[
                                1]
                            currentValue = hdf_Data_Array_Part2[
                                currentRowIndex][currValueIndex_2]
                            tempRow[
                                currentValueIndex_2_Adjusted] = currentValue
                        stitchedData_Array[currentRowIndex] = tempRow

                    # here goes...
                    hdf_Data_Array = stitchedData_Array
                except:
                    #e = sys.exc_info()[0]
                    # If this error keeps happening and can't figure it out,, read HDFDataToFile line 138 to see some more detailed notes on this issue.
                    logger.debug(
                        "DataCalculator: Download Job ERROR getting data from H5 to hdf_Data_Array: We are inside 2 try/except blocks, and the second one failed..  firstErrorMessage:  "
                        + str(firstErrorMessage) + " System Error Message: " +
                        str(sys.exc_info()))
                    return onErrorReturnValue

            # Points processing from geometry value
            thePoints = geometry  # New Context for 'geometry'!
            theLats = []
            theLongs = []

            # Get the list of lats and longs from the geometry points
            for p in range(thePoints.GetPointCount()):
                theLats.append(thePoints.GetY(p))
                theLongs.append(thePoints.GetX(p))

            # Get the Min Longitude and Max Latitude (Top Left Corner)
            minLong = min(theLongs)
            maxLat = max(theLats)

            # Adjust the max lat and min long for negative values (Need to make sure this works for datatypes other than climate model outputs)
            adjusted_Min_Long = minLong
            adjusted_Max_Lat = maxLat
            if (minLong < 0):
                #adjusted_Min_Long = minLong + 360
                adjusted_Min_Long = minLong
            if (maxLat < 0):
                #adjusted_Max_Lat = abs(maxLat) + 90    # This line caused images selected below 0 lat to be in a very wrong position (off by 97 ish on one test)
                #adjusted_Max_Lat = abs(maxLat) - 90
                adjusted_Max_Lat = maxLat

            # This quick fix did not work well enough... need something better.
            ## Quick Fix for 'bug 3 pixels off by half a degree'
            #pixel_Resolution_X = 0.5   # grid[1]
            #if(adjusted_Min_Long < 180):
            #    adjusted_Min_Long = adjusted_Min_Long + ( - ( pixel_Resolution_X / 2) )
            #else:
            #    adjusted_Min_Long = adjusted_Min_Long + (   ( pixel_Resolution_X / 2) )
            #pixel_Resolution_Y = -0.5   # grid[5]
            #if(adjusted_Max_Lat > 0):
            #    adjusted_Max_Lat = adjusted_Max_Lat + ( - ( abs(pixel_Resolution_Y) / 2) )
            #else:
            #    adjusted_Max_Lat = adjusted_Max_Lat + (   ( abs(pixel_Resolution_Y) / 2) )

            # Outfile transform x,y positions set using the adjusted min long and max lat
            outTransform_xPos = adjusted_Min_Long
            outTransform_yPos = adjusted_Max_Lat

            # Need this later
            noData_Value = theFillValue
            bandName = 1

            fullDatset_GeoTransform = geotransform
            outFullGeoTransform = (outTransform_xPos,
                                   fullDatset_GeoTransform[1],
                                   fullDatset_GeoTransform[2],
                                   outTransform_yPos,
                                   fullDatset_GeoTransform[4],
                                   fullDatset_GeoTransform[5])

            fullDataset_Projection = wkt

            uniqueID = uid  # Entire Job ID

            # Process the filename
            outFileName = extractTif.get_Tif_FileOutName(
                theDataTypeNumber, theYear, theMonth, theDay)
            outFileFolder = params.zipFile_ScratchWorkspace_Path + str(
                uid) + "/"
            outFileFullPath = outFileFolder + outFileName

            #logger.debug("Alert: 1")

            #logger.debug("Alert: 2")

            # Get the output File size
            out_X_Size = hdf_Data_Array.shape[1]
            out_Y_Size = hdf_Data_Array.shape[0]

            # Get the gdal driver and create the a blank output file
            theDriverFormat = "GTiff"
            theDriver = gdal.GetDriverByName(theDriverFormat)

            #logger.debug("Alert: 3")

            outDS = theDriver.Create(outFileFullPath, out_X_Size, out_Y_Size,
                                     1, GDT_Float32)

            #logger.debug("Alert: 4")

            # Get the image band and write the data array values to it.  Flush the Cache and set the NoDataValue (This is the step that writes data to the output file)
            outDataArray = hdf_Data_Array
            outBand = outDS.GetRasterBand(bandName)
            outBand.WriteArray(outDataArray, 0, 0)
            outBand.SetNoDataValue(noData_Value)
            outBand.FlushCache()

            #logger.debug("Alert: 5")

            # Set the projection and transform
            outDS.SetGeoTransform(outFullGeoTransform)
            outDS.SetProjection(fullDataset_Projection)

            # closes the dataset (Very important!)
            outDS = None

            #logger.debug("Alert: 6")

            # That should be it... we should now have a tif file located in the zipfile scratch area... and many, for each time this is run!

            # If we got this far, return '1' as a way to signal that it all worked and the current Tif file should be created.
            return 1
        except:
            # Something went wrong.
            logger.debug(
                "DataCalculator: Download Job ERROR: Not sure what went wrong... System Error Message: "
                + str(sys.exc_info()))
            return onErrorReturnValue
            pass

        # It's looking like we can use this return to be a 1 or 0 (if the tif file was generated or not?)
        return onErrorReturnValue
    else:

        # Normal Statistical operations
        mathoper = pMath.mathOperations(
            operationsType, 1, params.dataTypes[dataType]['fillValue'], None)
        try:
            store = dStore.datastorage(dataType, year)

            #logger.debug("DataCalculator Alert A")

            indexer = params.dataTypes[dataType]['indexer']

            #logger.debug("DataCalculator Alert B")

            fillValue = params.getFillValue(dataType)

            #logger.debug("DataCalculator Alert C")

            index = indexer.getIndexBasedOnDate(day, month, year)

            #logger.debug("DataCalculator Alert D")

            # This fix worked for the downloads... lets see if it works here too!
            array_H5Data = None
            try:
                array_H5Data = store.getData(index, bounds=bounds)
                logger.debug("BBBBBBBBBBBBB")
            except:
                firstErrorMessage = str(sys.exc_info())
                logger.debug(
                    "DataCalculator: Statistics Job ERROR getting data from H5 to array_H5Data: We are inside 2 try/except blocks.  firstErrorMessage:  "
                    + str(firstErrorMessage) +
                    ",  Trying something crazy before bailing out!")
                # Last ditch effort, lets replace the buggy h5py functions
                try:
                    # Vars we need in here..
                    theBounds = bounds
                    theStore = store
                    theIndex = index

                    # Stitch the two arrays together
                    breakPoint = 0
                    theBounds_Part1 = (theBounds[0], (breakPoint - 1),
                                       theBounds[2], theBounds[3])
                    theBounds_Part2 = (breakPoint, theBounds[1], theBounds[2],
                                       theBounds[3])
                    hdf_Data_Array_Part1 = theStore.getData(
                        theIndex, bounds=theBounds_Part1)
                    hdf_Data_Array_Part2 = theStore.getData(
                        theIndex, bounds=theBounds_Part2)
                    theHeight_Of_New_Array = hdf_Data_Array_Part1.shape[0]
                    theWidth_Of_New_Array = hdf_Data_Array_Part1.shape[
                        1] + hdf_Data_Array_Part2.shape[1]
                    stitchedData_Array = np.zeros(
                        shape=(theHeight_Of_New_Array, theWidth_Of_New_Array),
                        dtype=np.float32)
                    for currentRowIndex in range(0, theHeight_Of_New_Array):
                        tempRow = np.zeros(shape=(theWidth_Of_New_Array),
                                           dtype=np.float32)
                        for currValueIndex_1 in range(
                                0, hdf_Data_Array_Part1.shape[1]):
                            currentValue = hdf_Data_Array_Part1[
                                currentRowIndex][currValueIndex_1]
                            tempRow[currValueIndex_1] = currentValue
                        for currValueIndex_2 in range(
                                0, hdf_Data_Array_Part2.shape[1]):
                            currentValueIndex_2_Adjusted = currValueIndex_2 + hdf_Data_Array_Part1.shape[
                                1]
                            currentValue = hdf_Data_Array_Part2[
                                currentRowIndex][currValueIndex_2]
                            tempRow[
                                currentValueIndex_2_Adjusted] = currentValue
                        stitchedData_Array[currentRowIndex] = tempRow

                    # here goes...
                    array_H5Data = stitchedData_Array
                    logger.debug(
                        "DataCalculator stitchedData_Array has been built.")
                    #logger.debug("DataCalculator Value of 'stitchedData_Array': " + str(stitchedData_Array))

                except:
                    logger.debug(
                        "DataCalculator: Download Job ERROR getting data from H5 to hdf_Data_Array: We are inside 2 try/except blocks, and the second one failed..The code will break shortly...  firstErrorMessage:  "
                        + str(firstErrorMessage) + " System Error Message: " +
                        str(sys.exc_info()))

            #logger.debug("DataCalculator Alert E")

            logger.debug("DataCalculator.getDayValue : Value of 'index': " +
                         str(index))

            # ks note // understanding whats in the 'array' object
            #logger.debug("DataCalculator.getDayValue : Value of 'index': " + str(index))
            #logger.debug("DataCalculator.getDayValue : Value of 'array': " + str(array))
            #logger.debug("DataCalculator.getDayValue : Value of 'array': " + str(array))

            #mask = np.where((array_H5Data != fillValue) & (clippedmask == True))
            #
            #logger.debug("DataCalculator Alert F")
            #
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'clippedmask': " + str(clippedmask))
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'mask': " + str(mask))
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'array_H5Data': " + str(array_H5Data))
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(len(mask[0]))': " + str(len(mask[0])))
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(len(mask[1]))': " + str(len(mask[1])))
            #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(array_H5Data.size)': " + str(array_H5Data.size))

            # Something in here breaks on Climate Datatypes that are found in the southern hemisphere
            #mathoper.addData(array_H5Data[mask])       # SOMETHING WRONG HERE!!
            mask = None
            try:
                mask = np.where((array_H5Data != fillValue)
                                & (clippedmask == True))

                logger.debug("DataCalculator Alert F")

                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'clippedmask': " + str(clippedmask))
                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'mask': " + str(mask))
                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'array_H5Data': " + str(array_H5Data))
                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(len(mask[0]))': " + str(len(mask[0])))
                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(len(mask[1]))': " + str(len(mask[1])))
                #logger.debug("DataCalculator Alert F.debug: DataCalculator.getDayValue : Value of 'str(array_H5Data.size)': " + str(array_H5Data.size))

                # If the Size of the mask is 0.... raise exception
                if len(mask[0]) == 0:
                    logger.debug(
                        "DataCalculator Alert F.debug.raise: DataCalculator.getDayValue : Issue With len(mask[0]).  It should NOT be equal to 0.  Raising the exception...': "
                    )
                    raise

                mathoper.addData(array_H5Data[mask])  # SOMETHING WRONG HERE!!
            except:
                logger.debug(
                    "DataCalculator Alert F.except.debug: Something went wrong with the normal process.."
                )
                # Make a mask that matches the existing data array but whose values are the result of a clipped mask that is always
                sizeOfH5Data = array_H5Data.size  # ex: 24
                numOf_H5_Rows = array_H5Data.shape[0]  # ex: 3
                numOf_H5_Cols = array_H5Data.shape[1]  # ex: 8
                maskArray_1 = np.zeros(shape=(sizeOfH5Data), dtype=int)
                maskArray_2 = np.zeros(shape=(sizeOfH5Data), dtype=int)
                # Set the values of the arrays (looks like using range does not include the last value.)
                for j in range(0, numOf_H5_Rows):
                    for i in range(0, numOf_H5_Cols):
                        current_Index = i + (
                            numOf_H5_Cols * j
                        )  # currentColumnIndex + (numOfColumns * currentRowIndex)
                        current_Value_Part_1 = j  # Just put the Row Value (this gives the repeating pattern we want
                        current_Value_Part_2 = i  # Current Column should do it.. that pattern repeats for each row.
                        maskArray_1[current_Index] = current_Value_Part_1
                        maskArray_2[current_Index] = current_Value_Part_2
                fakeMask = (maskArray_1, maskArray_2)

                #logger.debug("DataCalculator Alert F.except.debug: DataCalculator.getDayValue : Value of 'fakeMask': " + str(fakeMask))

                # Lets try this again!!
                mathoper.addData(array_H5Data[fakeMask])

            #logger.debug("DataCalculator Alert G")

            del mask
            del array_H5Data
            store.close()
            #logger.debug("DataCalculator Alert H")
            value = mathoper.getOutput()
            #logger.debug("DataCalculator Alert I")
            mathoper.cleanup()
            logger.debug("DataCalculator Alert J")
            return value
        except:
            e = sys.exc_info()[0]
            logger.debug(
                "DataCalculator.getDayValue : returning fill value.. 'mathoper.getFillValue()': "
                + str(mathoper.getFillValue()) + " System Error Message: " +
                str(e))
            return mathoper.getFillValue()
Example #4
0
    def __preProcessIncomingRequest__(self, request):
        try:
            if (params.DEBUG_LIVE == True):
                self.logger.info(
                    "(" + self.name +
                    "):__preProcessIncomingRequest__: params.DEBUG_LIVE is set to True.  There will be a lot of textual output for this run."
                )
            uniqueid = request['uniqueid']
            self.__insertProgressDb__(uniqueid)
            self.__write_JobStarted_To_DB__(
                uniqueid, str(request))  # Log when Job has started.

            #self.logger.info("Processing Request: "+uniqueid)
            self.logger.info("(" + self.name +
                             "):__preProcessIncomingRequest__: uniqueid: " +
                             str(uniqueid))

            datatype = request['datatype']

            begintime = request['begintime']
            endtime = request['endtime']
            intervaltype = request['intervaltype']

            # KS Refactor 2015 // Dirty override for download operations type.
            operationtype = request[
                'operationtype']  # Original line (just get the operation param)
            # KS Refactor 2015 // Dirty override for download operations type.
            #self.mathop = pMath.mathOperations(operationtype,1,params.dataTypes[datatype]['fillValue'],None)
            #self.logger.info("("+self.name+"):__preProcessIncomingRequest__: DEBUG: About to do the DIRTY OVERRIDE! operationtype value: "+ str(operationtype))
            if (params.parameters[operationtype][1] == 'download'):
                # If this is a download dataset request, set the self.mathop prop to 0 (or 'max' operator.. this is just so I don't have to refactor a ton of code just to get this feature working at this time... note:  Refactor of this IS needed!)
                self.mathop = pMath.mathOperations(
                    0, 1, params.dataTypes[datatype]['fillValue'], None)

                # Additional customized code for download jobs
                self.isDownloadJob = True
                self.dj_OperationName = "download"
            else:
                # This is pass through for all normal requests..
                self.mathop = pMath.mathOperations(
                    operationtype, 1, params.dataTypes[datatype]['fillValue'],
                    None)
                self.isDownloadJob = False
                self.dj_OperationName = "NotDLoad"

            #self.logger.info("("+self.name+"):__preProcessIncomingRequest__: DEBUG: MADE IT PASSED THE DIRTY OVERRIDE! requestID: "+uniqueid)

            size = params.getGridDimension(int(datatype))
            dates = dproc.getListOfTimes(begintime, endtime, intervaltype)

            if (intervaltype == 0):
                dates = params.dataTypes[datatype]['indexer'].cullDateList(
                    dates)

            # KS Developer Note: The issue here is that I need to only cut simple rectangle shaped images out of the data.
            # All I really need is the largest bounding box that encompases all points (regardless of how complex the original polygon was)
            # Seems simple right? :)
            # The other part of this issue is that this only needs to happen on download data requests.  If I change the code for all requests, it becomes less efficient for stats type jobs.
            #self.logger.info("("+self.name+"):__preProcessIncomingRequest__: DEBUG ALERT: Right now, only user drawn polygons are supported for download requests.  Need to write a function that gets geometry values from features as well.. VERY IMPORTANT TODO BEFORE RELEASE!!")
            #geometry_ToPass = None
            polygon_Str_ToPass = None
            dataTypeCategory = params.dataTypes[datatype][
                'data_category']  #  == 'ClimateModel'

            geotransform, wkt = rp.getSpatialReference(int(datatype))
            self.logger.info(
                "*****************************code*****************************"
            )
            # User Drawn Polygon
            if ('geometry' in request):

                if (params.DEBUG_LIVE == True):
                    self.logger.info(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__: DEBUG: GEOMETRY FOUND (POLYGON DRAWN BY USER)"
                    )

                # Get the polygon string
                polygonstring = request['geometry']

                # Process input polygon string
                geometry = geoutils.decodeGeoJSON(polygonstring)
                #geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)

                if (params.DEBUG_LIVE == True):
                    self.logger.debug(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__ : polygonstring (request['geometry']) value: "
                        + str(polygonstring))

                # Needed for download types
                #polygon_Str_ToPass = polygonstring

                # IMPORTANT BEFORE RELEASING ALL DATA DOWNLOADS
                # running the below if statement part breaks the mask generation...
                # Latest test shows that CHIRPS dataset actually produces a working image
                # and that seasonal forecasts do as well...
                # Lets see if there is a way to keep the mask on file downloads..


# BillyZ Fixed this
                self.logger.info('*****dataTypeCategory:*****' +
                                 dataTypeCategory)
                if ((self.dj_OperationName == "download") |
                    (dataTypeCategory == 'ClimateModel') |
                    (dataTypeCategory == 'CHIRPS')):
                    #if(self.dj_OperationName == "download"):
                    #if((self.dj_OperationName == "download") | (dataTypeCategory == 'ClimateModel')| (dataTypeCategory == 'NDVI')):
                    self.logger.info(
                        "*****************************BYPASSED*****************************"
                    )
                    polygon_Str_ToPass = extractTif.get_ClimateDataFiltered_PolygonString_FromSingleGeometry(
                        geometry)

                    if (params.DEBUG_LIVE == True):
                        self.logger.debug(
                            "(" + self.name +
                            "):__preProcessIncomingRequest__ : polygon_Str_ToPass (request['geometry']) value: "
                            + str(polygon_Str_ToPass))

                    geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)
                    bounds, mask = mg.rasterizePolygon(geotransform, size[0],
                                                       size[1], geometry)
                else:
                    self.logger.info(
                        "*****************************Not bypassed*****************************"
                    )
                    polygon_Str_ToPass = polygonstring
                    bounds, mask = mg.rasterizePolygon(geotransform, size[0],
                                                       size[1], geometry)
                    #polygon_Str_ToPass = extractTif.get_ClimateDataFiltered_PolygonString_FromSingleGeometry(geometry)

                    if (params.DEBUG_LIVE == True):
                        self.logger.debug(
                            "(" + self.name +
                            "):__preProcessIncomingRequest__ : polygon_Str_ToPass (request['geometry']) value: "
                            + str(polygon_Str_ToPass))

                    #geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)
                    #bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)

                # ks refactor // Getting geometry and bounds info.
                #geometry_ToPass = geometry

                if (params.DEBUG_LIVE == True):
                    self.logger.debug(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__ : polygonstring (request['geometry']) value: "
                        + str(polygonstring))
                    self.logger.debug(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__ : (user defined polygon) geometry value: "
                        + str(geometry))
                    self.logger.debug(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__ : bounds value: " +
                        str(bounds))

            # User Selected a Feature
            elif ('layerid' in request):

                if (params.DEBUG_LIVE == True):
                    self.logger.info(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__: DEBUG: LAYERID FOUND (FEATURE SELECTED BY USER)"
                    )

                layerid = request['layerid']
                featureids = request['featureids']
                geometries = sf.getPolygons(layerid, featureids)

                if (params.DEBUG_LIVE == True):
                    self.logger.debug(
                        "(" + self.name +
                        "):__preProcessIncomingRequest__ : (FeatureSelection) geometries value: "
                        + str(geometries))

                # For Download data types, convert all of the geometries into a bounding box that covers the whole map.
                # RIGHT HERE!!
                #if(self.dj_OperationName == "download"):
                if ((self.dj_OperationName == "download") |
                    (dataTypeCategory == 'ClimateModel')):
                    # Convert all the geometries to the rounded polygon string, and then pass that through the system
                    polygonstring = extractTif.get_ClimateDataFiltered_PolygonString_FromMultipleGeometries(
                        geometries)
                    polygon_Str_ToPass = polygonstring
                    geometry = geoutils.decodeGeoJSON(polygonstring)
                    bounds, mask = mg.rasterizePolygon(geotransform, size[0],
                                                       size[1], geometry)

                else:

                    bounds, mask = mg.rasterizePolygons(
                        geotransform, size[0], size[1], geometries)

        #Break up date
        #Check for cached polygon
        #if no cached polygon exists rasterize polygon
            clippedmask = mask[bounds[2]:bounds[3], bounds[0]:bounds[1]]
            #self.logger.debug("("+self.name+"):__preProcessIncomingRequest__ : debug : Value of 'mask': " + str(mask))
            #self.logger.debug("("+self.name+"):__preProcessIncomingRequest__ : debug : Value of 'clippedmask': " + str(clippedmask))

            self.__writeMask__(uniqueid, clippedmask, bounds)

            del mask
            del clippedmask
            worklist = []
            for date in dates:
                workid = uu.getUUID()
                #workdict = {'uid':uniqueid,'workid':workid,'bounds':bounds,'datatype':datatype,'operationtype':operationtype, 'intervaltype':intervaltype}
                workdict = {
                    'uid': uniqueid,
                    'workid': workid,
                    'bounds': bounds,
                    'datatype': datatype,
                    'operationtype': operationtype,
                    'intervaltype': intervaltype,
                    'polygon_Str_ToPass': polygon_Str_ToPass
                }  #'geometryToClip':geometry_ToPass}
                if (intervaltype == 0):
                    workdict['year'] = date[2]
                    workdict['month'] = date[1]
                    workdict['day'] = date[0]
                    dateObject = dateutils.createDateFromYearMonthDay(
                        date[2], date[1], date[0])
                    workdict['isodate'] = dateObject.strftime(
                        params.intervals[0]['pattern'])
                    workdict['epochTime'] = dateObject.strftime("%s")
                    worklist.extend([workdict])
                elif (intervaltype == 1):
                    workdict['year'] = date[1]
                    workdict['month'] = date[0]
                    dateObject = dateutils.createDateFromYearMonth(
                        date[1], date[0])
                    workdict['isodate'] = dateObject.strftime(
                        params.intervals[0]['pattern'])
                    workdict['epochTime'] = dateObject.strftime("%s")
                    worklist.extend([workdict])
                elif (intervaltype == 2):
                    workdict['year'] = date
                    dateObject = dateutils.createDateFromYear(date)
                    workdict['isodate'] = dateObject.strftime(
                        params.intervals[0]['pattern'])
                    workdict['epochTime'] = dateObject.strftime("%s")
                    worklist.extend([workdict])
            # ks Refactor // Understanding how the work is distributed among worker threads.
            if (params.DEBUG_LIVE == True):
                self.logger.debug(
                    "(" + self.name +
                    "):__preProcessIncomingRequest__ : worklist array value: "
                    + str(worklist))

            return None, worklist
        except Exception as e:
            self.logger.warn(
                "(" + self.name +
                "):Error processing Request in HeadProcessor: uniqueid: " +
                str(uniqueid) + " Exception Error Message: " + str(e))
            return e, None
def _MonthlyRainfallAnalysis__make_SeasonalForecast_workList(
        uniqueid, request, datatype_uuid_for_CHIRPS,
        datatype_uuid_for_SeasonalForecast):
    worklist = []
    sub_type_name = 'SEASONAL_FORECAST'  # Choices for now are: 'CHIRPS_REQUEST' and 'SEASONAL_FORECAST'
    seasonal_start_date = request['seasonal_start_date']
    seasonal_end_date = request['seasonal_end_date']
    begintime = str(seasonal_start_date.split('_')[1]) + "/" + str(
        seasonal_start_date.split('_')[2]) + "/" + str(
            seasonal_start_date.split('_')[0])
    endtime = str(seasonal_end_date.split('_')[1]) + "/" + str(
        seasonal_end_date.split('_')[2]) + "/" + str(
            seasonal_end_date.split('_')[0])
    intervaltype = 0  # Daily
    operationtype = 5  # 5 == average, 0 == max, 1 == min
    placeholder_datatype = seasonalForecast_dataType_list[0]
    size = params.getGridDimension(int(placeholder_datatype))
    dates = dproc.getListOfTimes(begintime, endtime, intervaltype)
    if (intervaltype == 0):
        dates = params.dataTypes[placeholder_datatype]['indexer'].cullDateList(
            dates)

    # Iterate through all seasonalForecast dataTypes
    for seasonalForecast_dataType in seasonalForecast_dataType_list:
        datatype = seasonalForecast_dataType  # Much of the copy/paste code already references this as 'datatype'

        # PROCESS GEOMETRY STUFF NOW
        polygon_Str_ToPass = None
        geotransform, wkt = rp.getSpatialReference(int(datatype))
        if ('geometry' in request):
            # Get the polygon string
            polygonstring = request['geometry']
            # Process input polygon string
            geometry = geoutils.decodeGeoJSON(polygonstring)

            # # this IS a climate model type  --START
            polygon_Str_ToPass = extractTif.get_ClimateDataFiltered_PolygonString_FromSingleGeometry(
                geometry)
            geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)
            bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1],
                                               geometry)
            # # this IS a climate model type  --END

            # # this is not a download type or a climate model type  --START
            #polygon_Str_ToPass = polygonstring
            #bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)
            # # this is not a download type or a climate model type  --END

            # Fail.. remove this code!
            # This IS a ClimateModel Type (Modeling code after existing code here)
            #polygon_Str_ToPass = polygonstring
            #polygon_Str_ToPass = extractTif.get_ClimateDataFiltered_PolygonString_FromSingleGeometry(geometry)
            #geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)
            #bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)

        # User Selected a Feature
        elif ('layerid' in request):
            layerid = request['layerid']
            featureids = request['featureids']
            geometries = sf.getPolygons(layerid, featureids)

            # If we MUST have a polygon_Str_ToPass, uncomment the next two lines.
            # polygonstring = extractTif.get_ClimateDataFiltered_PolygonString_FromMultipleGeometries(geometries)
            # polygon_Str_ToPass = polygonstring

            # # this IS a download type or a climate model type --START
            polygonstring = extractTif.get_ClimateDataFiltered_PolygonString_FromMultipleGeometries(
                geometries)
            polygon_Str_ToPass = polygonstring
            geometry = geoutils.decodeGeoJSON(polygonstring)
            bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1],
                                               geometry)
            # # this IS a download type or a climate model type --END

            # # this is not a download type or a climate model type --START
            # The 'else' where it is NOT a seasonal forecast type.
            #bounds, mask = mg.rasterizePolygons(geotransform, size[0], size[1], geometries)
            # # this is not a download type or a climate model type --END

        # if no cached polygon exists rasterize polygon
        clippedmask = mask[bounds[2]:bounds[3], bounds[0]:bounds[1]]

        # TODO, Create System of multiple masks for the Monthly Analysis process.
        # self.__writeMask__(uniqueid, clippedmask, bounds)  # mst.writeHMaskToTempStorage(uid,array,bounds)
        # mst.writeHMaskToTempStorage(uniqueid,clippedmask,bounds)        # NEED TO FIND OUT HOW AND WHERE THIS IS USED IN THE DEEPER PROCESSING CODE, AND MAKE A SYSTEM THAT WILL ALLOW MORE THAN JUST ONE MASK..
        current_mask_uuid_for_SeasonalForecast = uu.getUUID()
        mst.writeHMaskToTempStorage(current_mask_uuid_for_SeasonalForecast,
                                    clippedmask, bounds)
        del mask
        del clippedmask

        # Build the worklist for each date in the dates
        for date in dates:
            workid = uu.getUUID()
            workdict = {
                'uid': uniqueid,
                'workid': workid,
                'datatype': datatype,
                'operationtype': operationtype,
                'intervaltype': intervaltype,
                'bounds': bounds,
                'polygon_Str_ToPass': polygon_Str_ToPass,
                'datatype_uuid_for_CHIRPS': datatype_uuid_for_CHIRPS,
                'datatype_uuid_for_SeasonalForecast':
                datatype_uuid_for_SeasonalForecast,
                'current_mask_and_storage_uuid':
                current_mask_uuid_for_SeasonalForecast,
                'sub_type_name': sub_type_name,
                'derived_product': True,
                'special_type': 'MonthlyRainfallAnalysis'
            }
            # Daily dates processing # if (intervaltype == 0):  # It is in this case, daily.
            workdict['year'] = date[2]
            workdict['month'] = date[1]
            workdict['day'] = date[0]
            dateObject = dateutils.createDateFromYearMonthDay(
                date[2], date[1], date[0])
            workdict['isodate'] = dateObject.strftime(
                params.intervals[0]['pattern'])
            workdict['epochTime'] = dateObject.strftime("%s")
            worklist.extend(
                [workdict]
            )  # Basically adds the entire workdict object to the worklist (could also be written as, worklist.append(workdict)

    return worklist