Exemple #1
0
def Execute(self, parameters, messages):
    gp = arcgisscripting.create()
    # Check out any necessary licenses
    gp.CheckOutExtension("spatial")

    gp.OverwriteOutput = 1
    gp.LogHistory = 1

    # Load required toolboxes...

    # Script arguments...
    try:
        unitCell = parameters[5].value
        CheckEnvironment();
        if unitCell < (float(gp.CellSize)/1000.0)**2:
            unitCell = (float(gp.CellSize)/1000.0)**2
            gp.AddWarning('Unit Cell area is less than area of Study Area cells.\n'+
                        'Setting Unit Cell to area of study area cells: %.0f sq km.'%unitCell)

        #Get evidence layer names
        Input_Rasters = parameters[0].valueAsText.split(';')
        #Remove group layer names 
        for i, s in enumerate(Input_Rasters):
            Input_Rasters[i] = arcpy.Describe( s.strip("'")).file;
        gp.AddMessage("Input rasters: " + str(Input_Rasters))

        #Get evidence layer types
        Evidence_types = parameters[1].valueAsText.lower().split(';')
        gp.AddMessage('Evidence_types: %s'%(str(Evidence_types)))
        if len(Evidence_types) != len(Input_Rasters):
            gp.AddError("Not enough Evidence types!")
            raise Exception
        for evtype in Evidence_types:
            if not evtype[0] in 'ofc':
                gp.AddError("Incorrect Evidence type: %s"%evtype)
                raise Exception
        #Get weights tables names
        Wts_Tables = parameters[2].valueAsText.split(';')
        gp.AddMessage('Wts_Tables: %s'%(str(Wts_Tables)))
        if len(Wts_Tables) != len(Wts_Tables):
            gp.AddError("Not enough weights tables!")
            raise Exception
        #Get Training sites feature layer
        TrainPts = parameters[3].valueAsText
        gp.AddMessage('TrainPts: %s'%(str(TrainPts)))
        #Get missing data values
        MissingDataValue = parameters[4].valueAsText
        lstMD = [MissingDataValue for ras in Input_Rasters]
        gp.AddMessage('MissingDataValue: %s'%(str(MissingDataValue)))
        #Get output raster name
        thmUC = gp.createscratchname("tmp_UCras", '', 'raster',   gp.scratchworkspace)

        #Print out SDM environmental values
        sdmvalues.appendSDMValues(gp, unitCell, TrainPts)

        #Create Generalized Class tables
        Wts_Rasters = []
        mdidx = 0
        gp.AddMessage("Creating Generalized Class rasters.")
        for Input_Raster, Wts_Table in zip(Input_Rasters, Wts_Tables):
            Output_Raster = gp.CreateScratchName(os.path.basename(Input_Raster[:9]) + "_G", '', 'raster', gp.scratchworkspace)            
            gp.AddMessage('Output_Raster: %s'%(str(Output_Raster)))
        #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
            #++ Need to create in-memory Raster Layer for AddJoin
            RasterLayer = "OutRas_lyr"
            gp.makerasterlayer(Input_Raster, RasterLayer)
            gp.AddJoin_management(RasterLayer, "Value", Wts_Table, "CLASS")
            Temp_Raster = gp.CreateScratchName('temp_ras', '', 'raster',  gp.scratchworkspace)
            gp.AddMessage('Temp_Raster: %s'%(str(Temp_Raster)))
            gp.CopyRaster_management(RasterLayer, Temp_Raster)
            gp.Lookup_sa(Temp_Raster, "GEN_CLASS", Output_Raster)
            gp.delete(RasterLayer)
        #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            #gp.AddMessage(Output_Raster + " exists: " + str(gp.Exists(Output_Raster)))
            if not gp.Exists(Output_Raster):
                gp.AddError(Output_Raster + " does not exist.")
                raise Exception
            Wts_Rasters.append(gp.Describe(Output_Raster).CatalogPath)
        #Create the Unique Conditions raster from Generalized Class rasters
    ##    #>>>> Comment out for testing >>>>>>>>>>>>>>>>>>>>>>>>>>
        Input_Combine_rasters = ";".join(Wts_Rasters)
        #Combine created Wts_Rasters and add to TOC
        #gp.AddMessage('Combining...%s'%Input_rasters)
        if gp.exists(thmUC): gp.delete_management(thmUC)
        gp.Combine_sa(Input_Combine_rasters, thmUC)
    ##    #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
        #gp.AddMessage('Combine done...')

        #Get UC lists from combined raster
        UCOIDname = gp.describe(thmUC).OIDfieldname
        #First get names of evidence fields in UC raster
        evflds = []
        ucflds = gp.ListFields(thmUC)
        ucfld = ucflds.Next()
        while ucfld:
            if (ucfld.Name == UCOIDname) or (ucfld.Name.upper() in ('VALUE', 'COUNT')):
            #if ucfld.Name == UCOIDname or ucfld.Name == 'Value' or ucfld.Name == 'Count':
                pass
            else:
                evflds.append(ucfld.Name)
            ucfld = ucflds.Next()
        #gp.AddMessage('evflds: %s'%str(evflds))
        #Set up empty list of lists
        lstsVals = [[] for fld in evflds]
        #gp.AddMessage('lstsVals: %s'%str(lstsVals))
        #Put UC vals and areas for each evidence layer in lstsVals
        cellSize = float(gp.CellSize)
        lstAreas = [[] for fld in evflds]
        if gp.describe(thmUC).datatype == 'RasterLayer':
            thmUCRL = gp.describe(thmUC).catalogpath
        else:
            thmUCRL = thmUC
        ucrows = workarounds_93.rowgen(gp.SearchCursor(thmUCRL))
        for ucrow in ucrows:
            for i, fld in enumerate(evflds):
                lstsVals[i].append(ucrow.GetValue(fld))
                lstAreas[i].append(ucrow.Count * cellSize * cellSize / (1000000.0 * unitCell))
        #gp.AddMessage('lstsVals: %s'%(str(lstsVals)))
        #gp.AddMessage('lstAreas: %s'%(str(lstAreas)))

        #Check Maximum area of conditions so not to exceed 100,000 unit areas
        #This is a limitation of Logistic Regression: sdmlr.exe
        maxArea = max(lstAreas[0])
        if (maxArea/unitCell)/100000.0 > 1:
            unitCell = math.ceil(maxArea/100000.0)
            gp.AddWarning('UnitCell is set to minimum %.0f sq. km. to avoid area limit in Logistic Regression!'%unitCell)

        #Get Number of Training Sites per UC Value
        #First Extract RasterValues to Training Sites feature layer
        #ExtrTrainPts = os.path.join(gp.ScratchWorkspace, "LRExtrPts.shp")
        #ExtrTrainPts = gp.CreateScratchName('LRExtrPts', 'shp', 'shapefile', gp.scratchworkspace)
        #gp.ExtractValuesToPoints_sa(TrainPts, thmUC, ExtrTrainPts, "NONE", "VALUE_ONLY")
        ExtrTrainPts = workarounds_93.ExtractValuesToPoints(gp, thmUC, TrainPts, "TPFID")
        #Make dictionary of Counts of Points per RasterValue
        CntsPerRasValu = {}
        tpFeats = workarounds_93.rowgen(gp.SearchCursor(ExtrTrainPts))
        for tpFeat in tpFeats:
            if tpFeat.RasterValu in CntsPerRasValu.keys():
                CntsPerRasValu[tpFeat.RasterValu] += 1
            else:
                CntsPerRasValu[tpFeat.RasterValu] = 1
        #gp.AddMessage('CntsPerRasValu: %s'%(str(CntsPerRasValu)))
        #Make Number of Points list in RasterValue order
        #Some rastervalues can have no points in them
        lstPnts = []
        numUC = len(lstsVals[0])
        for i in range(1, numUC+1): #Combined raster values start at 1
            if i in CntsPerRasValu.keys():
                lstPnts.append(CntsPerRasValu.get(i))
            else:
                lstPnts.append(0)
        #gp.AddMessage('lstPnts: %s'%(lstPnts))
        lstsMC = []
        mcIndeces = []
        for et in Evidence_types:
            if et.startswith('o'):
                mcIndeces.append(-1)
            elif et.startswith('f') or et.startswith('c'):
                mcIndeces.append(1)
            else:
                gp.AddError('Incorrect evidence type')
                raise Exception
        if len(mcIndeces) != len(Input_Rasters):
            gp.AddError("Incorrect number of evidence types.")
            raise Exception
        #gp.AddMessage('mcIndeces: %s'%(str(mcIndeces)))
        catMCLists = [[], mcIndeces]
        evidx = 0
        for mcIdx in mcIndeces:
            catMCLists[0].append([])
            if mcIdx<0:
                pass
            else:
                #Make a list of free raster values
                #evidx = len(catMCLists[0]) - 1
                #gp.AddMessage(Wts_Rasters[evidx])
                wts_g = gp.createscratchname("Wts_G")
                gp.MakeRasterLayer_management(Wts_Rasters[evidx], wts_g)
                #evrows = gp.SearchCursor("Wts_G")
                evrows = FloatRasterSearchcursor(gp, wts_g)
                #evrow = evrows.next()
                for evrow in evrows:
                    #gp.AddMessage("Value: %s"%evrow.value)
                    if evrow.Value not in catMCLists[0][evidx]:
                        catMCLists[0][evidx].append(evrow.Value)
                    #evrow = evrows.next()
            evidx += 1
        #gp.AddMessage('catMCLists: %s'%(catMCLists))
        lstWA = CalcVals4Msng(lstsVals, lstAreas[0], lstMD, catMCLists)
        #gp.AddMessage('lstWA: %s'%(str(lstWA)))
        ot = [['%s, %s'%(Input_Rasters[i], Wts_Tables[i])] for i in range(len(Input_Rasters))]
        #gp.AddMessage("ot=%s"%ot)
        strF2 = "case.dat"
        fnCase = os.path.join(arcpy.env.scratchFolder, strF2)
        fCase = open(fnCase, 'w')
        if not fCase :
            gp.AddError("Can't create 'case.dat'.")
            raise Exception
        nmbUC = len(lstsVals[0])
        getNmbET = True # True when first line of case.dat
        nmbET = 0 # Number of ET values in a line of case.dat
        #gp.AddMessage("Writing Logistic Regression input files...")
        ''' Reformat the labels for free evidence '''
        for j in range(len(lstsVals)):
            mcIdx = mcIndeces[j]
            if mcIdx > -1:
                listVals = catMCLists[0][j]
                #gp.AddMessage('listVals: %s'%(listVals))
                lstLV = listVals[:]
                lstLV = RemoveDuplicates(lstLV)
                elOT = ot[j]
                tknTF = elOT[0].split(',')
                strT = tknTF[0].strip()
                strF = tknTF[1].strip()
                first = True
               #gp.AddMessage("lstLV=%s"%lstLV)
                #gp.AddMessage("elOT=%s"%elOT)
                for lv in lstLV:
                    if lv == lstMD[j]: continue
                    if first:
                        elOT = ["%s (%s)"%(elOT[0], lv)]
                        first = False
                    else:
                        elOT.append("%s, %s (%s)"%(strT, strF, lv))
                    #gp.AddError("elOT=%s"%elOT)
                ot[j] = elOT
        #gp.AddMessage('ot=%s'%(str(ot)))
    ##' Loop through the unique conditions, substituting
    ##' the weighted average of known classes for missing data
    ##' and 'expanding' multi-class free data themes to
    ##' a series of binary themes
    ##'----------------------------------------------
        #gp.AddMessage('lstWA: %s'%lstWA)
        for i in range(nmbUC):
            numPoints = lstPnts[i]
    ##        #>>> This is a kluge for problem in case.dat for sdmlr.exe
    ##        if numPoints == 0: continue
    ##        #Fractional numpoints is not accepted
    ##        #This means that UC area had no extracted points,
    ##        #and should not be a case here.
    ##        #<<< End kluge for case.dat
            wLine = ""
            wLine = wLine + ('%-10d'%(i+1))
            j = 0
            for lst in lstsVals:
                missing = lstMD[j]
                theVal = lst[i]
                mcIdx = mcIndeces[j]
                if mcIdx < 0: #ordered evidence
                    if getNmbET: nmbET = nmbET + 1
                    if theVal == missing:
                        theVal = lstWA[j][0] #avgweighted
                    wLine = wLine + '%-20s'%theVal
                else: #free evidence
                    listVals = catMCLists[0][j]
                    #gp.AddMessage('catMCLists[%d]: %s'%(j, catMCLists[0][j]))
                    OFF = 0
                    ON = 1
                    if theVal == missing:
                        m=0
                        for v in listVals:
                            if v == missing:
                                continue
                            else:
                                #gp.AddMessage('lstWA[%d][%d]=%s'%(j, m, lstWA[j]))
                                valWA = lstWA[j][m]
                                wLine = wLine + '%-20s'%valWA
                                m += 1
                                if getNmbET: nmbET += 1
                    else:
                        for v in listVals:
                            if v == missing:
                                continue
                            elif getNmbET: nmbET += 1
                            if theVal == v:
                                wLine = wLine + '%-20s'%ON
                            else:
                                wLine = wLine + '%-20s'%OFF
                j += 1
            wLine = wLine + '%-10d'%numPoints
            theArea = lstAreas[0][i] / unitCell
            wLine = wLine + '%-20s' %theArea
            fCase.write(wLine + '\n')
            getNmbET = False
        fCase.close()
    ##' Write a parameter file to the ArcView extension directory
    ##'----------------------------------------------
        strF1 = "param.dat"
        fnParam = os.path.join(arcpy.env.scratchFolder, strF1) #param.dat file
        fParam = open(fnParam, 'w')
        if not fParam:
            gp.AddError("Error writing logistic regression parameter file.")
            raise Exception
        fParam.write('%s\\\n' %(arcpy.env.scratchFolder))
        fParam.write('%s\n' %strF2)
        fParam.write("%d %g\n" %(nmbET, unitCell))
        fParam.close()

    ### RunLR ------------------------------------------------------------------------------------------------------------
    #Check input files
        #Check input files exist
        #Paramfile = os.path.join(gp.scratchworkspace, 'param.dat')
        Paramfile = os.path.join(arcpy.env.scratchFolder, 'param.dat')
        if gp.exists(Paramfile):
            pass
            #gp.AddMessage("\nUsing the following input file in Logistic Regression: %s"%(Paramfile))
        else:
            gp.AddError("Logistic regression parameter file does not exist: %s"%Paramfile)
            raise Exception
        #Place input files folder in batch file
        #sdmlr.exe starts in input files folder.
        sdmlr = os.path.join(sys.path[0], 'bin', 'sdmlr.exe')
        if not os.path.exists(sdmlr):
            gp.AddError("Logistic regression executable file does not exist: %s"%sdmlr)
            raise Exception
        os.chdir(arcpy.env.scratchFolder)
        if os.path.exists('logpol.tba'): os.remove('logpol.tba')
        if os.path.exists('logpol.out'): os.remove('logpol.out')
        if os.path.exists('cumfre.tba'): os.remove('cumfre.tba')
        if os.path.exists('logco.dat'): os.remove('logco.dat')
        fnBat = os.path.join(arcpy.env.scratchFolder, 'sdmlr.bat')
        #fnBat = os.path.join( sys.path[0], 'sdmlr.bat')
        fBat = open(fnBat, 'w')
        #fBat.write("%s\n"%os.path.splitdrive(gp.ScratchWorkspace)[0])
        fBat.write("%s\n"%os.path.splitdrive(arcpy.env.scratchFolder)[0])
        fBat.write("CD %s\n"%os.path.splitdrive(arcpy.env.scratchFolder)[1])
        fBat.write('"%s"\n'%sdmlr)
        fBat.close()
        params = []
        try:
            #os.spawnv(os.P_WAIT, fnBat, params) # <==RDB  07/01/2010  replace with subprocess
            import subprocess
            p = subprocess.Popen([fnBat,params]).wait()
            gp.AddMessage('Running %s: '%fnBat)
        except OSError:
            gp.AddMessage('Exectuion failed %s: '%fnBat)

        if not os.path.exists('logpol.tba'):
            gp.AddError("Logistic regression output file %s\\logpol.tba does not exist.\n Error in case.dat or param.dat. "%arcpy.env.scratchFolder)
            raise Exception
        #gp.AddMessage("Finished running Logistic Regression")

    ###ReadLRResults -------------------------------------------------------------------------------------------------------

        thmuc = thmUC
        vTabUC = 'thmuc_lr'
        gp.MakeRasterLayer_management(thmuc, vTabUC)
        strFN = "logpol.tba"
        #strFnLR = os.path.join(gp.ScratchWorkspace, strFN)
        strFnLR = os.path.join(arcpy.env.scratchFolder, strFN)

        if not gp.Exists(strFnLR):
            gp.AddError("Reading Logistic Regression Results\nCould not find file: %s"%strFnLR)
            raise 'Existence error'
        #gp.AddMessage("Opening Logistic Regression Results: %s"%strFnLR)
        fLR = open(strFnLR, "r")
        if not fLR:
            gp.AddError("Input Error - Unable to open the file: %s for reading." %strFnLR)
            raise 'Open error'
        read = 0
        #fnNew = gp.GetParameterAsText(6)
        fnNew = parameters[6].valueAsText
        tblbn = os.path.basename(fnNew)
        [tbldir, tblfn] = os.path.split(fnNew)
        if tbldir.endswith(".gdb"):
            tblfn = tblfn[:-4] if tblfn.endswith(".dbf") else tblfn
            fnNew = fnNew[:-4] if fnNew.endswith(".dbf") else fnNew
            tblbn = tblbn[:-4] if tblbn.endswith(".dbf") else tblbn
        gp.AddMessage("fnNew: %s"%fnNew)
        gp.AddMessage('Making table to hold logistic regression results: %s'%fnNew)
        fnNew = tblbn
        print ("Table dir: ", tbldir);
        gp.CreateTable_management(tbldir, tblfn)
        print('Making table to hold logistic regression results: %s'%fnNew)
        fnNew = tbldir + "/" + fnNew;

        #To point to REAL table

        gp.AddField_management(fnNew, 'ID', 'LONG', 6)
        gp.AddField_management(fnNew, 'LRPostProb', 'Double', "#", "#", "#", "LR_Posterior_Probability")
        gp.AddField_management(fnNew, 'LR_Std_Dev', 'Double', "#", "#", "#", "LR_Standard_Deviation")
        gp.AddField_management(fnNew, 'LRTValue', 'Double', "#", "#", "#", "LR_TValue")
        gp.DeleteField_management(fnNew, "Field1")
        vTabLR = fnNew
        strLine = fLR.readline()
        vTabUCrows = workarounds_93.rowgen(gp.SearchCursor(vTabUC))
        #vTabUCrow = vTabUCrows.Next()
        ttl = 0
        #while vTabUCrow:
        for vTabUCrow in vTabUCrows: ttl += 1
            #vTabUCrow = vTabUCrows.Next()
        #gp.AddMessage("Reading Logistic Regression Results: %s"%strFnLR)
        vTabLRrows = gp.InsertCursor(vTabLR)
        while strLine:
            print (strLine);
            if strLine.strip() == 'DATA':
                read = 1
            elif read:
                vTabLRrow = vTabLRrows.NewRow()
                lstLine = strLine.split()
                if len(lstLine) > 5:
                    #gp.AddMessage('lstLine: %s'%lstLine)
                    vTabLRrow.SetValue("ID", int(lstLine[1].strip()))
                    vTabLRrow.SetValue("LRPostProb", float(lstLine[3].strip()))
                    vTabLRrow.SetValue("LR_Std_Dev", float(lstLine[5].strip()))
                    vTabLRrow.SetValue("LRTValue", float(lstLine[4].strip()))
                    vTabLRrows.InsertRow(vTabLRrow)
            strLine = fLR.readline()
        fLR.close()
        del vTabLRrow, vTabLRrows
        #gp.AddMessage('Created table to hold logistic regression results: %s'%fnNew)

    ##' Get the coefficients file
    ##'----------------------------------------------
        strFN2 = "logco.dat"
        fnLR2 = os.path.join(arcpy.env.scratchFolder, strFN2)
    ##  ' Open file for reading
    ##  '----------------------------------------------
        #gp.AddMessage("Opening Logistic Regression coefficients Results: %s"%fnLR2)
        fLR2 = open(fnLR2, "r")
        read = 0
    ##  ' Expand object tag list of theme, field, value combos
    ##  '----------------------------------------------
        #gp.AddMessage('Expanding object tag list of theme, field, value combos')
        lstLabels = []
        for el in ot:
            for e in el:
                lstLabels.append(e.replace(' ', ''))
        #gp.AddMessage('lstLabels: %s'%lstLabels)
    ##  ' Make vtab to hold theme coefficients
    ##  '----------------------------------------------
        #fnNew2 = gp.GetParameterAsText(7)
        fnNew2 = parameters[7].valueAsText
        tblbn = os.path.basename(fnNew2)
        [tbldir, tblfn] = os.path.split(fnNew2)
        if tbldir.endswith(".gdb"):
            tblfn = tblfn[:-4] if tblfn.endswith(".dbf") else tblfn
            fnNew2 = fnNew2[:-4] if fnNew2.endswith(".dbf") else fnNew2
            tblbn = tblbn[:-4] if tblbn.endswith(".dbf") else tblbn
        fnNew2 = tblbn
        print ("Tabledir: ", tbldir);
        #gp.AddMessage('Making table to hold theme coefficients: %s'%fnNew2)
        print('Making table to hold theme coefficients: %s'%fnNew2)
        #fnNew2 = tbldir + "/" + fnNew2;
        fnNew2 = os.path.join(tbldir, fnNew2)
        gp.AddMessage('Making table to hold theme coefficients: %s'%fnNew2)
        gp.CreateTable_management(tbldir, tblfn)
        gp.AddField_management(fnNew2, "Theme_ID", 'Long', 6, "#", "#", "Theme_ID")
        gp.AddField_management(fnNew2, "Theme", 'text', "#", "#", 256, "Evidential_Theme")
        gp.AddField_management(fnNew2, "Coeff", 'double', "#", "#", "#", 'Coefficient')
        gp.AddField_management(fnNew2, "LR_Std_Dev", 'double', "#", "#", "#", "LR_Standard_Deviation")
        gp.DeleteField(fnNew2, "Field1")
        vTabLR2 = fnNew2
        strLine = fLR2.readline()
        i = 0
        first = 1
        #gp.AddMessage("Reading Logistic Regression Coefficients Results: %s"%fnLR2)
        vTabLR2rows = gp.InsertCursor(vTabLR2)
        print ("Starting to read LR_Coeff")
        while strLine:
            print ("Rdr:" , strLine);
            if len(strLine.split()) > 1:
                if strLine.split()[0].strip() == 'pattern':
                    read = 1
                    strLine = fLR2.readline()
                    continue
            if read:

                lstLine = strLine.split()
                if len(lstLine) > 2:
                    vTabLR2row = vTabLR2rows.NewRow()
                    #vTabLR2row.SetValue('Theme_ID', long(lstLine[0].strip())+1)
                    print ("Theme: ", lstLine[0].strip());
                    vTabLR2row.SetValue('Theme_ID', int(lstLine[0].strip())+1)
                    if not first:
                        try:
                            #For all but first...
                            lbl = lstLabels.pop(0);
                            print ("Lbl:", lbl);
                            vTabLR2row.SetValue('Theme', lbl)
                        except IndexError:
                            gp.AddError('Evidence info %s not consistent with %s file'%(otfile, fnLR2))
                        i = i+1
                    else:
                        vTabLR2row.SetValue('Theme', "Constant Value")
                        first = 0
                    print ("Coeff:", lstLine[1].strip());
                    vTabLR2row.SetValue("Coeff", float(lstLine[1].strip()))
                    print ("LR_std_dev:", lstLine[2].strip());
                    vTabLR2row.SetValue("LR_Std_Dev", float(lstLine[2].strip()))
                    vTabLR2rows.InsertRow(vTabLR2row)
                else:
                    break
            strLine = fLR2.readline()
        fLR2.close()
        if len(lstLabels) != 0:
            gp.AddError('Evidence info %s not consistent with %s file'%(otfile, fnLR2))
        del vTabLR2row, vTabLR2rows
        #gp.AddMessage('Created table to hold theme coefficients: %s'%fnNew2)

        #Creating LR Response Rasters
        #Join LR polynomial table to unique conditions raster and copy
        #to get a raster with attributes
        cmb = thmUC
        cmbrl = 'cmbrl'
        gp.makerasterlayer_management(cmb, cmbrl)
        #tbl = gp.GetParameterAsText(6)
        tbl = parameters[6].valueAsText
        tbltv = 'tbltv'
        gp.maketableview_management(tbl, tbltv)
        gp.addjoin_management(cmbrl, 'Value', tbltv, 'ID')
        cmb_cpy = gp.createscratchname("cmb_cpy", '', 'raster', arcpy.env.scratchFolder)
        gp.copyraster_management(cmbrl, cmb_cpy)
        #Make output float rasters from attributes of joined unique conditions raster
        #outRaster1 = gp.GetParameterAsText(8)
        #outRaster2 = gp.GetParameterAsText(9)
        #outRaster3 =  gp.GetParameterAsText(10)
        outRaster1 =  parameters[8].valueAsText
        outRaster2 =  parameters[9].valueAsText
        outRaster3 =  parameters[10].valueAsText
        gp.addmessage("="*41+'\n'+"="*41)
        ##template = {'cmbrl':cmb_cpy}
        ##InExp = "CON(%(cmbrl)s.LRPOSTPROB >= 0, %(cmbrl)s.LRPOSTPROB, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster1)
        ##InExp = "CON(%(cmbrl)s.LR_STD_DEV >= 0, %(cmbrl)s.LR_STD_DEV, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster2)
        ##InExp = "CON(%(cmbrl)s.LRTVALUE >= 0, %(cmbrl)s.LRTVALUE, 0)"%template
        ##gp.SingleOutputMapAlgebra_sa(InExp, outRaster3) # <==RDB  07/01/2010
        # <==RDB  07/01/2010 -  SOMA expression is crashing in version 10. Changed to use Con tool.
        
        gp.Con_sa(cmb_cpy,cmb_cpy+".LRPOSTPROB",outRaster1,"0","LRPOSTPROB > 0")
        gp.Con_sa(cmb_cpy,cmb_cpy+".LR_STD_DEV",outRaster2,"0","LR_STD_DEV > 0")
        gp.Con_sa(cmb_cpy,cmb_cpy+".LRTVALUE",outRaster3,"0","LRTVALUE > 0")

        #Add t0 display
        #gp.SetParameterAsText(6, tbl)
        arcpy.SetParameterAsText(6,tbl)
        #gp.SetParameterAsText(7, gp.describe(vTabLR2).catalogpath)
        arcpy.SetParameterAsText(7, gp.describe(vTabLR2).catalogpath)
        #gp.SetParameterAsText(8, outRaster1)
        arcpy.SetParameterAsText(8, outRaster1)
        #gp.SetParameterAsText(9, outRaster2)
        arcpy.SetParameterAsText(9, outRaster2)
        #gp.SetParameterAsText(10, outRaster3)
        arcpy.SetParameterAsText(10, outRaster3)
    except arcpy.ExecuteError as e:
        arcpy.AddError("\n");
        arcpy.AddMessage("Caught ExecuteError in logistic regression. Details:");
        args = e.args[0];
        args.split('\n')
        arcpy.AddError(args);
        # get the traceback object
        tb = sys.exc_info()[2]
         # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        msgs = "Traceback\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        arcpy.AddError(msgs)
        raise 
    except:
        # get the traceback object
        tb = sys.exc_info()[2]
         # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        msgs = "Traceback\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " +str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
        gp.AddError(msgs)
        # return gp messages for use with a script tool
        gp.AddError(pymsg)
        # print messages for use in Python/PythonWin
        print (pymsg)
        raise
def Calculate(self, parameters, messages):
    messages.addMessage("Starting Agterberg-Cheng CI Test")
    try:
        PostProb =  parameters[0].valueAsText
        PPStd =  parameters[1].valueAsText
        TrainSites =  parameters[2].valueAsText
        UnitArea =  parameters[3].value
        SaveFile =  parameters[4].valueAsText

        basename = os.path.basename(PostProb)
        sdmuc = basename.split("_")[0]
        #CellSize
        CellSize = float(arcpy.env.cellSize)
        #ExpNumTP = gp.GetCount(TrainSites) #Num of Selected sites
        result = arcpy.GetCount_management(TrainSites)
        ExpNumTP = int(result.getOutput(0))
        ConvFac = (CellSize**2)/1000000.0 / UnitArea
        TRasRows = FloatRasterSearchcursor(gp,PostProb)
        PredT = 0.0
        for TRasRow in TRasRows:
            PredT += (TRasRow.Value * TRasRow.Count)
        PredT *= ConvFac
        del TRasRow,TRasRows
        TRasRows = FloatRasterSearchcursor(gp,PPStd)
        TVar = 0.0
        for TRasRow  in TRasRows:
            TVar += (TRasRow.Value * TRasRow.Count * ConvFac )**2
        TStd = math.sqrt(TVar)
        del TRasRow,TRasRows
        TS = (PredT - ExpNumTP) / TStd
        #PostProb
        n = ExpNumTP
        T = PredT
        #STD = TStd
        P = ZtoF(TS) *100.0
        if P>= 50.0: overallCI = 100.0 * (100.0 - P) / 50.0
        else: overallCI = 100.0 * (100.0 - (50 + (50 - P))) / 50.0

        Text = """
        Overall CI: %(0).1f%%\r
        Conditional Independence Test: %(1)s\r
        Observed No. training pts, n = %(2)i\r
        Expected No. of training points, T = %(3).1f\r
        Difference, T-n = %(4).1f\r
        Standard Deviation of T = %(5).3f\r
        \r
        ------------------------------------------------\r
        Conditional Independence Ratio: %(6).2f <simply the ratio n/T>\r
        Values below 1.00 may indicate conditional dependence\r
        among two or more of your data sets.  <Bonham-Carter(1994,ch.9)\r
        suggest that values <0.85 may indicate a problem>\r
        \r
        ------------------------------------------------\r
        Agterberg & Cheng Conditional Independence Test\r
        <See Agterberg and Cheng, Natural Resources Research 11(4), 249-255, 2002>\r
        This is a one-tailed test of the null hypothesis that T-n=0.  The test\r
        statistic is (T-n)/standard deviation of T. Probability values greater\r
        than 95%% or 99%% indicate that the hypothesis of CI should be rejected,\r
        but any value greater than 50%% indicates that some conditional\r
        dependence occurs>\r
        \r
        Probability that this model is not conditionally independent with\r
        (T-n)/Tstd = %(7).6f is %(8).1f%%\r
        ------------------------------------------------\r
        \r
        Input Data:\r
        Post Probability: %(9)s\r
        Post Probability Std Deviation: %(10)s\r
        Training Sites: %(11)s
        \r
        """ % {'0': overallCI, '1': sdmuc, '2':n, '3':T, '4':T-n, '5':TStd, '6':n/T, '7':TS, '8':ZtoF(TS)*100.0, '9':PostProb,
               '10':PPStd, '11':TrainSites}

        messages.addMessage(Text)

        if SaveFile:
            file = open(SaveFile,"w")
            file.write(Text)
            messages.addMessage("Text File saved: %s"%SaveFile)

    except Exception as Msg:
        # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
            str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        messages.addErrorMessage(pymsg)
        # print messages for use in Python/PythonWin
        print (pymsg)
        raise
def MissingDataVariance(gp, Wts_Rasters, PostProb, OutputName):
    ##    #gp.AddMessage("Args: %s"%sys.argv)
    ##    PostProb = gp.GetParameterAsText(1)
    CellSize = float(gp.CellSize)
    ##    OutputName = gp.GetParameterAsText(2)
    ##    #gp.AddMessage("Parameters: %s,%s,%s,%s" %(Wts_Rasters,OutputName,PostProb,CellSize))

    # Local variables...

    try:
        #gp.AddMessage("Args: %s"%sys.argv)
        #Local Variables.....
        i = 0

        #Create Total Missing Data Variance list
        TotClsVars = []

        #Loop throught Wts Rasters
        for Wts_Raster0 in Wts_Rasters:
            gp.AddMessage("Missing data Variance for: " + Wts_Raster0)
            Wts_Raster = gp.describe(Wts_Raster0).catalogpath
            TotDataArea = TotalAreaFromCounts(gp, Wts_Raster, CellSize)
            gp.AddMessage('TotDataArea = %.0f' % TotDataArea)

            #Start MD Variance raster
            #Get PostProb raster of MD cells
            R1 = os.path.join(gp.ScratchWorkspace, "R1")
            if gp.Exists(R1): gp.Delete(R1)
            Exp0 = "CON(%s == 0.0,%s,0.0)" % (Wts_Raster, PostProb)
            gp.AddMessage("R1=" + Exp0)
            gp.SingleOutputMapAlgebra_sa(Exp0, R1)
            #Get PostODDs raster of MD cells
            R2 = os.path.join(gp.ScratchWorkspace, "R2")
            if gp.Exists(R2): gp.Delete(R2)
            Exp = "%s / (1.0 - %s)" % (R1, R1)
            gp.AddMessage("R2=" + Exp)
            gp.SingleOutputMapAlgebra_sa(Exp, R2)
            gp.AddMessage("R2 exists: " + str(gp.Exists(R2)))

            #Get Total Variance of MD cells
            #Create total class variances list
            ClsVars = []

            #gp.AddMessage(gp.describe(Wts_Raster).DataType)
            ##        if gp.describe(Wts_Raster).DataType == 'RasterLayer':
            ##            theRaster = Wts_Raster
            ##        else:
            ##            theRaster = 'theRaster'
            ##            gp.MakeRasterLayer(Wts_Raster,theRaster)
            Wts_RasterRows = FloatRasterSearchcursor(gp, Wts_Raster)
            j = 0
            """ Cannot be done by single raster; must generate a raster for each value """
            for Wts_RasterRow in Wts_RasterRows:
                #???????????????????????????????????????????????
                if Wts_RasterRow.Value == 0.0:
                    continue
                #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
                ClsVar = str(
                    os.path.join(gp.ScratchWorkspace, "ClsVar%s%s" % (i, j)))
                j += 1
                if gp.Exists(ClsVar): gp.Delete(ClsVar)
                #gp.AddMessage("Weight=%s" % Wts_RasterRow.Value)
                Exp1 = 'CON(%s == 0.0,0.0,EXP(LN(%s) + %s))' % (
                    R2, R2, Wts_RasterRow.Value)
                Exp2 = "%s / (1 + %s)" % (Exp1, Exp1)
                ClsArea = float(
                    Wts_RasterRow.Count) * CellSize * CellSize / 1000000.0
                #gp.AddMessage("Class Area=%s" % ClsArea)
                Exp3 = "SQR(%s - %s) * (%s / %s)" % (Exp2, R1, ClsArea,
                                                     TotDataArea)
                #gp.AddMessage("ClsVar%s%s: "%(i,j) + Exp3)
                gp.SingleOutputMapAlgebra_sa(Exp3, ClsVar)
                #gp.AddMessage("Class Variance=%s" % ClsVar)
                ClsVars.append(str(ClsVar))  # Save the class variance raster

            del Wts_RasterRows
            #Sum the class variances
            TotClsVar = os.path.join(gp.ScratchWorkspace, "TotClsVar%s" % i)
            i += 1
            if gp.Exists(TotClsVar): gp.Delete(TotClsVar)
            Exp = "SUM%s" % str(tuple(ClsVars))
            #gp.AddMessage("TotClsVar%s: "%i+Exp)
            gp.SingleOutputMapAlgebra_sa(Exp, TotClsVar)
            TotClsVars.append(str(TotClsVar))

        #Create Total Missing Data Variance raster and list
        else:
            if len(Wts_Rasters) > 0:
                TotVarMD = OutputName
                Exp = "SUM%s" % str(tuple(TotClsVars))
                #gp.AddMessage(OutputName + ": " + Exp)
                gp.SingleOutputMapAlgebra_sa(Exp, TotVarMD)

    #except Exception,Msg:  # This is invalid syntax #AL 150620
    except Exception:
        # get the traceback object
        tb = sys.exc_info()[2]
        # tbinfo contains the line number that the code failed on and the code from that line
        tbinfo = traceback.format_tb(tb)[0]
        # concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback Info:\n" + tbinfo + "\nError Info:\n    " + \
            str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        # generate a message string for any geoprocessing tool errors
        msgs = "GP ERRORS:\n" + gp.GetMessages(2) + "\n"
        gp.AddError(msgs)

        # return gp messages for use with a script tool
        gp.AddError(pymsg)

        # print messages for use in Python/PythonWin
        print(pymsg)  #AL 150620 added parenthesis to both lines
        print(msgs)