Exemplo n.º 1
0
def getEveryRecordForSingleDCAValue(featureClass, DCAValue, scratchWorkspace):
    '''Returns a dictionary with key values representing the historic ADS
    data's Original OBJECTID and values of
    TPA, DCA, HOST and ACRES for each unique DCAValue
    '''
    DCADict = {}
    outputTableName = 'DCA{}'.format(DCAValue)
    outputTableNamePath = os.path.join(scratchWorkspace, outputTableName)

    arcpy.TableSelect_analysis(
        featureClass, outputTableNamePath,
        'DCA1 = {0} OR DCA2 = {0} OR DCA3 = {0}'.format(DCAValue))

    convertNonDCAValuesToNull(outputTableName, DCAValue)

    for number in range(1, 4):
        fields = [
            'ADS_OBJECTID', 'TPA{}'.format(number), 'DCA{}'.format(number),
            'HOST{}'.format(number), 'DMG_TYPE{}'.format(number), 'ACRES'
        ]
        cursor = arcpy.da.SearchCursor(outputTableName, fields)
        for row in cursor:
            if row[0] not in DCADict.keys() and row[2] == DCAValue:
                DCADict[row[0]] = []
                DCADict[row[0]].append(
                    [row[1], row[2], row[3], row[4], row[5]])
            elif row[0] in DCADict.keys() and row[2] == DCAValue:
                DCADict[row[0]].append(
                    [row[1], row[2], row[3], row[4], row[5]])

    return DCADict
Exemplo n.º 2
0
def fracciona_tabla(tabla, base):
    nombre_tabla = arcpy.Describe(tabla).name
    table_view = arcpy.MakeTableView_management(tabla,
                                                nombre_tabla).getOutput(0)
    if int(arcpy.GetCount_management(tabla).getOutput(0)) > 1000000:

        arcpy.TableSelect_analysis(in_table=table_view,
                                   out_table=os.path.join(
                                       base, nombre_tabla + "_px_1"),
                                   where_clause="OBJECTID <= 1000000")

        arcpy.TableSelect_analysis(in_table=table_view,
                                   out_table=os.path.join(
                                       base, nombre_tabla + "_px2_2"),
                                   where_clause="OBJECTID > 1000000")

    else:
        arcpy.TableSelect_analysis(in_table=table_view,
                                   out_table=os.path.join(base, nombre_tabla))
Exemplo n.º 3
0
def load_table_in_memory():
    # Copy Rows
    arcpy.env.workspace = "in_memory"
    arcpy.CopyRows_management(All_ID_Mapping_Original_csv,
                              All_ID_Mapping_Original_Copy, "")

    # Table Select
    if choose_standard == "B2":
        arcpy.TableSelect_analysis(
            All_ID_Mapping_Original_Copy, All_ID_Mapping_Sel_10_char,
            "(CHAR_LENGTH ( \"LegacyKey\" ) = 10) AND (\"Standard\" <> 'C') OR \"Standard\" IS Null"
        )
    else:
        arcpy.TableSelect_analysis(
            All_ID_Mapping_Original_Copy, All_ID_Mapping_Sel_10_char,
            "(CHAR_LENGTH ( \"LegacyKey\" ) = 10) AND (\"Standard\" <> 'B2') OR \"Standard\" IS Null"
        )

    # Load filter_codes_table to in_memory workspace
    arcpy.CopyRows_management(filter_codes_table, filter_codes_table_Copy, "")
Exemplo n.º 4
0
def tb_stats():
    outSheet = outWorkbook.add_worksheet(tb[0:30])
    outSheet.set_column(0, 4, 15)
    totalRows = arcpy.GetCount_management(tb)
    fields = arcpy.ListFields(tb)
    stats_fields = []
    output = "memory" + "\\" + str(tb)
    outSheet.write(0, 0, "NAME")
    outSheet.write(0, 1, tb)
    outSheet.write(1, 0, "TYPE")
    outSheet.write(1, 1, "Table")
    outSheet.write(2, 0, "GCS name")
    outSheet.write(2, 1, "N/A")
    outSheet.write(3, 0, "GCS type")
    outSheet.write(3, 1, "N/A")
    outSheet.write(4, 0, "ROWS")
    outSheet.write(4, 1, int(str(totalRows)))
    outSheet.write(5, 0, "FIELDS")
    outSheet.write(5, 1, int(str(len(fields))))
    outSheet.write(6, 0, "GEOM ERROR")
    outSheet.write(6, 1, "N/A")
    outSheet.write(8, 0, "FIELD")
    outSheet.write(8, 1, "ALIAS")
    outSheet.write(8, 2, "TYPE")
    outSheet.write(8, 3, "COUNT NULL")
    outSheet.write(8, 4, "COUNT BLANK")
    for field in fields:
        if field.type not in ("OID", "Geometry"):
            outSheet.write(fields.index(field) + 8, 0, field.name)
            outSheet.write(fields.index(field) + 8, 1, field.aliasName)
            outSheet.write(fields.index(field) + 8, 2, field.type)
            stats_fields.append([field.name, "COUNT"])
        if field.type not in ("OID", "Geometry", "Double", "Integer",
                              "SmallInteger", "Single"):
            out_tb = "memory" + "\\" + str(tb) + "_" + str(field.name)
            expression = str(field.name) + ' IN (\'\', \' \')'
            arcpy.TableSelect_analysis(tb, out_tb, expression)
            totalBlank = arcpy.GetCount_management(out_tb)
            if int(str(totalBlank)) > 0:
                outSheet.write(
                    fields.index(field) + 8, 4, int(str(totalBlank)))
            arcpy.management.Delete(out_tb)
    arcpy.Statistics_analysis(tb, output, stats_fields)
    fieldsOutput = arcpy.ListFields(output)
    for field in fieldsOutput:
        with SearchCursor(output, [field.name]) as cursor:
            for row in cursor:
                if fieldsOutput.index(field) > 1:
                    outSheet.write(
                        fieldsOutput.index(field) + 7, 3,
                        int(totalRows[0]) - row[0])
    arcpy.management.Delete(output)
Exemplo n.º 5
0
def createTdlEnodeb():
    sourceTdlEnodebTable = ArcCatalogPath + "\\WangYouGis_Oracle97.sde\\WANGYOU.GIS_TDL_CM_ENODEB"
    targetTdlTemp = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouSite.DBO.GIS_LTE_ENODEB_TEMP"
    targetTdlEnodebFeature = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouSite.DBO.GIS_LTE_ENODEB" + yestday.strftime(
        '%Y%m%d')
    print "delete exists targetTdlEnodebFeature"
    if (arcpy.Exists(targetTdlEnodebFeature)):
        arcpy.Delete_management(targetTdlEnodebFeature, "FeatureClass")
    targetTdlEnodebFeature = arcpy.CreateFeatureclass_management(
        os.path.dirname(targetTdlEnodebFeature),
        os.path.basename(targetTdlEnodebFeature), "Point", targetTdlTemp,
        "DISABLED", "DISABLED", arcpy.SpatialReference(4326))
    print "筛选当天的LTE基站生成要素"
    logging.info("筛选当天的LTE基站生成要素")
    lteEnobedFields = arcpy.ListFields(sourceTdlEnodebTable)
    LTE_ENOBED_ALL = arcpy.TableSelect_analysis(
        sourceTdlEnodebTable, "in_memory/LTE_ENOBED_ALL",
        "TIME = TO_DATE('" + currentDoTimeStr + "','YYYY-MM-DD HH24:MI:SS')")
    createFeatureFromXY(LTE_ENOBED_ALL, "LONGITUDE", "LATITUDE",
                        targetTdlEnodebFeature, lteEnobedFields)
Exemplo n.º 6
0
def createGsmBts():
    sourceGsmBtsTable = ArcCatalogPath + "\\WangYouGis_Oracle97.sde\\WANGYOU.GIS_GSM_CM_BTS"
    targetGsmTemp = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouSite.DBO.GIS_GSM_BTS_TEMP"
    targetGsmBtsFeature = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouSite.DBO.GIS_GSM_BTS" + yestday.strftime(
        '%Y%m%d')
    print "delete exists targetGsmBtsFeature"
    if (arcpy.Exists(targetGsmBtsFeature)):
        arcpy.Delete_management(targetGsmBtsFeature, "FeatureClass")
    targetGsmBtsFeature = arcpy.CreateFeatureclass_management(
        os.path.dirname(targetGsmBtsFeature),
        os.path.basename(targetGsmBtsFeature), "Point", targetGsmTemp,
        "DISABLED", "DISABLED", arcpy.SpatialReference(4326))
    print "筛选当天的GSM基站生成要素"
    logging.info("筛选当天的GSM基站生成要素")
    gsmBtsFields = arcpy.ListFields(sourceGsmBtsTable)
    GSM_BTS_ALL = arcpy.TableSelect_analysis(
        sourceGsmBtsTable, "in_memory/GSM_BTS_ALL",
        "TIME = TO_DATE('" + currentDoTimeStr + "','YYYY-MM-DD HH24:MI:SS')")
    createFeatureFromXY(GSM_BTS_ALL, "LONGITUDE", "LATITUDE",
                        targetGsmBtsFeature, gsmBtsFields)
Exemplo n.º 7
0
    def onClick(self):

        # Importa tabela Excel para ArcGIS
        arcpy.ExcelToTable_conversion(r'path\to\matriz_impacto.xls', r'path\to\save.dbf', 'nome')

        # Seleciona localizacoes = [1, 2, 3, 4] e separa impactos por area
        for i in range(1,5):

            arcpy.TableSelect_analysis('nome', r'path\to\split' + str(i) + '.dbf', '"Localizaca" = ' + str(i))
            
            # Conta numero de linhas das tabelas criadas
            count=arcpy.GetCount_management('split' + str(i))

            # Copia shapes das localizacoes
            for j in range(0,int(count[0])):
                arcpy.CopyFeatures_management(['ADA_SIRGAS','AID_SIRGAS','AII_SIRGAS','AAR_SIRGAS'][i-1], r'path\to\copy'+str(i)+'_'+str(j), '#', '0', '0', '0')

                arcpy.TableSelect_analysis('split' + str(i), r:'path\to\split'+str(i)+'_'+str(j), '"OID" = '+str(j))

                arcpy.JoinField_management('copy'+str(i)+'_'+str(j), 'Valor', 'split'+str(i)+'_'+str(j), 'LOCALIZACA', '#')

                arcpy.PolygonToRaster_conversion('copy'+str(i)+'_'+str(j), 'SIGNIFICAN', r'path\to\impacto'+str(i)+'_'+str(j)+'.tif', 'CELL_CENTER', 'NONE','0,0001')
Exemplo n.º 8
0
def JoinImpacts(
    space, Dtlst, Flds, Sdbf
):  ###### This where Impact Shapefiles are created based of forecast dates join to zipcode shapfile
    OldZipFile = r"W://Accu_Internal_Resources//ArcMap_ArcGIS//Features//Shapefiles.gdb//USA_ZIPS_HD"
    CopyName = "ZIPCODES.shp"
    CopyZipFile = os.path.join(space, CopyName)
    env.workpace = space
    ShpList = []
    for row in Dtlst:
        Name = 'Impact_' + row.replace('/', '_') + '.dbf'
        TempShp = 'Imp_' + row.replace('/', '_') + '.shp'
        Newshp = 'Imp_' + row.replace('/', '_') + '_prj.shp'
        NewImpact = os.path.join(env.workspace, Name)
        NewZipshp = os.path.join(env.workspace, Newshp)
        Where_Clause = '"DATES"' + '=' + "'" + row + "'"
        CopyShp = arcpy.CopyFeatures_management(OldZipFile, CopyZipFile)
        arcpy.TableSelect_analysis(Sdbf, NewImpact, Where_Clause)
        Location = space
        Join_Table = arcpy.JoinField_management(CopyShp, "POSTCODE", NewImpact,
                                                "POSTCODES", Flds)
        NewFields = [f.name for f in arcpy.ListFields(Join_Table)]
        print NewFields
        Feat_Clause = '"POSTCODES"' + '<>' + "' '"
        arcpy.FeatureClassToFeatureClass_conversion(Join_Table, Location,
                                                    TempShp, Feat_Clause)
        Newshp = arcpy.Project_management(
            TempShp, NewZipshp,
            r"W:\\Lamar_Projects\\Repository_Codes\\Temp\\WGS_1984_Web_Mercator_Auxiliary_Sphere.prj"
        )
        arcpy.Delete_management(Join_Table)
        DBFTables = arcpy.ListTables()
        ShpList.append(NewZipshp)

    print DBFTables
    print Flds
    return ShpList
Exemplo n.º 9
0
def unique_values(table, field):
    with arcpy.da.SearchCursor(table, [field]) as cursor:
        return sorted({str(row[0]) for row in cursor})


myRPSUIDs = unique_values(pointFc, str(updateRPSUID))

for RPSUID in myRPSUIDs:
    # for RPSUID in myRPSUIDs:
    arcpy.AddMessage(RPSUID)
    tbl = os.path.join("in_memory", "tbl")

    if arcpy.Exists(tbl):
        arcpy.Delete_management(tbl)
    arcpy.TableSelect_analysis(joinFc,
                               tbl,
                               where_clause=sourceRPSUID + ' =' + RPSUID +
                               " and " + joinValFld + " " + whereClause)

    #create dictionary
    #Key: join field
    #Value: field with value to be transferred
    valueDi = dict([
        (key, val)
        for key, val in arcpy.da.SearchCursor(tbl, [joinIdFld, joinValFld])
    ])

    # convert dictionary keys and values to update fc field types
    f1 = arcpy.ListFields(pointFc, wild_card=updateFld)[0]
    f2 = arcpy.ListFields(pointFc, wild_card=IdFld)[0]

    # convert dictionary values
Exemplo n.º 10
0
                                  out_name="FACILITIES_TABLE",
                                  out_path=defaultgdb)
    arcpy.management.DeleteIdentical(in_dataset="FACILITIES_TABLE",
                                     fields="FACILITY_ID")
    if arcpy.Exists("TCHD_ADDRESS"):
        arcpy.Delete_management("TCHD_ADDRESS")
    arcpy.TableToTable_conversion(in_rows=TCHD_ADDRESS,
                                  out_name="TCHD_ADDRESS",
                                  out_path=defaultgdb)

    ###############################################################################
    # SELECT ROWS WITH NULL LAT OR LON and vice versa
    # CREATE TARGET FACILITIES FC IF DOESN'T EXIST ALREADY IN OUTPUT GDB AND (ONLY ROWS WITH LAT AND LON)
    arcpy.TableSelect_analysis(
        in_table="FACILITIES_TABLE",
        out_table="nullGeoRows",
        where_clause=
        "(GIS_LATITUDE IS NULL OR GIS_LONGITUDE IS NULL ) AND (Upper(NAME) NOT LIKE '%DO NOT USE%') AND (Upper(STREET_NAME) NOT LIKE '%Mobile%')"
    )
    arcpy.TableSelect_analysis(
        in_table="FACILITIES_TABLE",
        out_table="FACILITIESw_GEOM",
        where_clause=
        "(GIS_LATITUDE IS NOT NULL) AND (GIS_LONGITUDE IS NOT NULL ) AND (Upper(NAME) NOT LIKE '%DO NOT USE%')"
    )

    ###############################################################################
    # GEOCODE NECESSARY ROWS
    #address_fields = "Street ConcatAddress;ZIP ZIP"
    address_fields = "'Street or Intersection' ConcatAddress VISIBLE NONE;'City or Placename' CITY VISIBLE NONE;State STATE VISIBLE NONE;'ZIP Code' ZIP VISIBLE NONE"
    if arcpy.Exists("GeocodedAddresses"):
        arcpy.Delete_management("GeocodedAddresses")
Exemplo n.º 11
0
def efficient_merge(feature_class_or_table_list, output_fc, filter=''):
    fc_count = len(feature_class_or_table_list)
    all_exist_test = all(
        arcpy.Exists(fct) for fct in feature_class_or_table_list)

    # EXECUTE
    # Start with FC containing largest extent to prevent spatial grid errors
    description = arcpy.Describe(feature_class_or_table_list[0])
    if description.dataType == "FeatureClass":
        descriptions = [
            arcpy.Describe(fc).extent for fc in feature_class_or_table_list
        ]
        fc_areas = [
            int(d.XMax - d.XMin) * int(d.YMax - d.YMin) for d in descriptions
        ]
        index = [i for i, x in enumerate(fc_areas) if x == max(fc_areas)]
        first_fc = feature_class_or_table_list[index[0]]
        feature_class_or_table_list.remove(first_fc)
    else:
        first_fc = feature_class_or_table_list.pop(0)

    # This is a fast and stable merge method for this number of features compared to arcpy Merge
    if all_exist_test:
        print(
            "Beginning merge of {} feature classes, copying first feature class to output..."
            .format(fc_count))
        if description.dataType == "FeatureClass":
            arcpy.Select_analysis(first_fc, output_fc, filter)
        else:
            arcpy.TableSelect_analysis(first_fc, output_fc, filter)
        arcpy.SetLogHistory = False  # speeds up iterative updates, won't write to geoprocessing for every step
        cursor_fields = list(arcpy.da.SearchCursor(output_fc, ['*']).fields)
        if description.dataType == "FeatureClass":
            cursor_fields.append('SHAPE@')
        insertRows = arcpy.da.InsertCursor(output_fc, cursor_fields)

        for fc in feature_class_or_table_list:
            counter = 0
            searchRows = arcpy.da.SearchCursor(fc, cursor_fields, filter)
            for searchRow in searchRows:
                insertRows.insertRow(searchRow)
                counter += 1
            try:
                del searchRow, searchRows
            except:
                print(
                    "Merged NO features from {}; filter eliminated all features"
                    .format(fc))
            print("Merged {0} features from {1}".format(counter, fc))
        del insertRows
        arcpy.SetLogHistory = True

        # Rebuild indexes
        try:
            arcpy.AddIndex_management(output_fc, 'Permanent_Identifier',
                                      'IDX_Permanent_Identifier')
            arcpy.AddIndex_management(output_fc, 'nhd_merge_id',
                                      'IDX_nhd_merge_id')
        except:
            arcpy.AddWarning(
                'Could not build Permanent_Identifier index because there is no such field.'
            )
        return arcpy.Describe(output_fc).catalogPath

    else:
        print(
            "ERROR: One or more feature class paths is not valid. Merged feature class not created."
        )
        return False
Exemplo n.º 12
0
        CellThiessYunHW = ArcCatalogPath + "\\" + SITEDBPath + "\\WangYouSite.dbo.GIS_RASTER_TSS_GSM_HW"
        CellThiessYunLL = ArcCatalogPath + "\\" + SITEDBPath + "\\WangYouSite.dbo.GIS_RASTER_TSS_GSM_LL"
        CellShanXing = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouCellThiess.dbo.GIS_OBJECT_GSMSX" + yestday.strftime(
            '%Y%m%d')
        zhiBiaoTableMOrg = ArcCatalogPath + "\\" + DataSourcePath + "\\WANGYOU.HF_PM_CELL_DAY_2G_M"
        zhiBiaoTableWOrg = ArcCatalogPath + "\\" + DataSourcePath + "\\WANGYOU.HF_PM_CELL_DAY_2G_W"
        zhiBiaoTableGisM = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouCellThiess.dbo.HF_PM_CELL_DAY_2G_M"
        zhiBiaoTableGisW = ArcCatalogPath + "\\" + GISDBPath + "\\WangYouCellThiess.dbo.HF_PM_CELL_DAY_2G_W"

        print "delete exists zhiBiaoTableGis"
        logging.info("delete exists zhiBiaoTableGis")
        if (arcpy.Exists(zhiBiaoTableGis)):
            arcpy.Delete_management(zhiBiaoTableGis)
        print "筛选当天的指标数据"
        arcpy.TableSelect_analysis(
            zhiBiaoTableOrg, zhiBiaoTableGis, "TIME_STAMP = TO_DATE('" +
            currentDoTimeStr + "','YYYY-MM-DD HH24:MI:SS')")
        if (curDay == 3):
            print "生成月平均"
            print "delete exists zhiBiaoTableGisM"
            logging.info("delete exists zhiBiaoTableGisM")
            if (arcpy.Exists(zhiBiaoTableGisM)):
                arcpy.Delete_management(zhiBiaoTableGisM)
            arcpy.TableSelect_analysis(
                zhiBiaoTableMOrg, zhiBiaoTableGisM, "TIME_STAMP = TO_DATE('" +
                lastMonthEnd.strftime('%Y-%m-01 00:00:00') +
                "','YYYY-MM-DD HH24:MI:SS')")
        if (curWeekDay == 3):
            print "生成周平均"
            print "delete exists zhiBiaoTableGisW"
            logging.info("delete exists zhiBiaoTableGisW")
Exemplo n.º 13
0
def mainFunction(updateFolder,fileName,updateMode,geodatabase): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)  
    try:
        # --------------------------------------- Start of code --------------------------------------- #
        
        # Get the arcgis version
        arcgisVersion = arcpy.GetInstallInfo()['Version']   

        # If a specific file is provided
        if (fileName):
            latestFile = os.path.join(updateFolder, fileName)
        # Otherwise get the latest file in a folder
        else:
            # Get the newest zip file from the update folder
            latestFile = max(glob.iglob(updateFolder + r"\*.zip"), key=os.path.getmtime)
      
        # Setup geodatabase to load data into in temporary workspace
        tempFolder = arcpy.CreateFolder_management(arcpy.env.scratchFolder, "WebData-" + str(uuid.uuid1()))
        arcpy.AddMessage("Copying datasets...")    
          
        # Extract the zip file to a temporary location
        zip = zipfile.ZipFile(latestFile, mode="r")
        zip.extractall(str(tempFolder))

        # Loop through the files in the extracted folder
        for file in os.listdir(str(tempFolder)):               
            # If it's a shapefile
            if file.endswith(".shp"):
               # Get count of the source dataset
               datasetCount = arcpy.GetCount_management(os.path.join(str(tempFolder), file))
               eachFeatureclass = file.replace(".shp","")
          
               # Check Dataset record count is more than 0
               if (long(str(datasetCount)) > 0):
                   # If update mode is then copy, otherwise delete and appending records                
                   if (updateMode == "New"):                                           
                       # Logging
                       arcpy.AddMessage("Copying over feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                       if (enableLogging == "true"):
                          logger.info("Copying over feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                                
                       # Copy feature class into geodatabase using the same dataset name
                       arcpy.CopyFeatures_management(os.path.join(str(tempFolder), file), os.path.join(geodatabase, eachFeatureclass), "", "0", "0", "0")

                       # Get dataset count
                       datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, eachFeatureclass)) 
                       arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                       if (enableLogging == "true"):
                           logger.info("Dataset record count - " + str(datasetCount))   
                   else:
                        # If dataset exists in geodatabase, delete features and load in new data
                        if arcpy.Exists(os.path.join(geodatabase, eachFeatureclass)):
                            # Logging
                            arcpy.AddMessage("Updating feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                            if (enableLogging == "true"):
                               logger.info("Updating feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
             
                            arcpy.DeleteFeatures_management(os.path.join(geodatabase, eachFeatureclass))
                            arcpy.Append_management(os.path.join(str(tempFolder), file), os.path.join(geodatabase, eachFeatureclass), "NO_TEST", "", "")

                            # Get dataset count
                            datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, eachFeatureclass)) 
                            arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                            if (enableLogging == "true"):
                               logger.info("Dataset record count - " + str(datasetCount))   
                        else:
                            # Log warning
                            arcpy.AddWarning("Warning: " + os.path.join(geodatabase, eachFeatureclass) + " does not exist. Copying over...")
                            # Logging
                            if (enableLogging == "true"):
                                logger.warning(os.path.join(geodatabase, eachFeatureclass) + " does not exist. Copying over...")
                                
                            # Copy feature class into geodatabase using the same dataset name
                            arcpy.CopyFeatures_management(os.path.join(str(tempFolder), file), os.path.join(geodatabase, eachFeatureclass), "", "0", "0", "0")           
               else:
                   arcpy.AddWarning("Dataset " + eachFeatureclass + " is empty and won't be copied...")                        
                   # Logging
                   if (enableLogging == "true"):
                       logger.warning("Dataset " + eachFeatureclass + " is empty and won't be copied...")
                               
            # If it's a FGDB
            if file.endswith(".gdb"):
                # Assign the geodatabase workspace and load in the datasets to the lists
                arcpy.env.workspace = file
                featureclassList = arcpy.ListFeatureClasses()   
                tableList = arcpy.ListTables()       
      
                # Load the feature classes into the geodatabase if at least one is in the geodatabase provided
                if (len(featureclassList) > 0):        
                    # Loop through the feature classes
                    for eachFeatureclass in featureclassList:
                       # Get count of the source dataset
                       datasetCount = arcpy.GetCount_management(eachFeatureclass)                   
                       # Check Dataset record count is more than 0
                       if (long(str(datasetCount)) > 0):
                           # Create a Describe object from the dataset
                           describeDataset = arcpy.Describe(eachFeatureclass)
                           # If update mode is then copy, otherwise delete and appending records                
                           if (updateMode == "New"):                                           
                               # Logging
                               arcpy.AddMessage("Copying over feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                               if (enableLogging == "true"):
                                  logger.info("Copying over feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                                        
                               # Copy feature class into geodatabase using the same dataset name
                               arcpy.CopyFeatures_management(eachFeatureclass, os.path.join(geodatabase, describeDataset.name), "", "0", "0", "0")

                               # Get dataset count
                               datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, describeDataset.name)) 
                               arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                               if (enableLogging == "true"):
                                   logger.info("Dataset record count - " + str(datasetCount))   
                           else:
                                # If dataset exists in geodatabase, delete features and load in new data
                                if arcpy.Exists(os.path.join(geodatabase, eachFeatureclass)):
                                    # Logging
                                    arcpy.AddMessage("Updating feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                                    if (enableLogging == "true"):
                                       logger.info("Updating feature class - " + os.path.join(geodatabase, eachFeatureclass) + "...")
                     
                                    arcpy.DeleteFeatures_management(os.path.join(geodatabase, eachFeatureclass))
                                    arcpy.Append_management(os.path.join(arcpy.env.workspace, eachFeatureclass), os.path.join(geodatabase, eachFeatureclass), "NO_TEST", "", "")

                                    # Get dataset count
                                    datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, eachFeatureclass)) 
                                    arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                    if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))   
                                else:
                                    # Log warning
                                    arcpy.AddWarning("Warning: " + os.path.join(geodatabase, eachFeatureclass) + " does not exist. Copying over...")
                                    # Logging
                                    if (enableLogging == "true"):
                                        logger.warning(os.path.join(geodatabase, eachFeatureclass) + " does not exist. Copying over...")
                                        
                                    # Copy feature class into geodatabase using the same dataset name
                                    arcpy.CopyFeatures_management(eachFeatureclass, os.path.join(geodatabase, describeDataset.name), "", "0", "0", "0")           
                       else:
                           arcpy.AddWarning("Dataset " + eachFeatureclass + " is empty and won't be copied...")                        
                           # Logging
                           if (enableLogging == "true"):
                               logger.warning("Dataset " + eachFeatureclass + " is empty and won't be copied...")

                                                         
                if (len(tableList) > 0):    
                    # Loop through of the tables
                    for eachTable in tableList:
                       # Get count of the source dataset
                       datasetCount = arcpy.GetCount_management(eachTable)                   
                       # Check Dataset record count is more than 0
                       if (long(str(datasetCount)) > 0):
                           # Create a Describe object from the dataset
                           describeDataset = arcpy.Describe(eachTable)
                           # If update mode is then copy, otherwise delete and appending records                
                           if (updateMode == "New"):
                               # Logging
                               arcpy.AddMessage("Copying over table - " + os.path.join(geodatabase, eachTable) + "...")
                               if (enableLogging == "true"):
                                  logger.info("Copying over table - " + os.path.join(geodatabase, eachTable) + "...")
                                  
                               # Copy table into geodatabase using the same dataset name
                               arcpy.TableSelect_analysis(eachTable, os.path.join(geodatabase, describeDataset.name), "")

                               # Get dataset count
                               datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, describeDataset.name)) 
                               arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                               if (enableLogging == "true"):
                                   logger.info("Dataset record count - " + str(datasetCount))   
                           else:
                                # If dataset exists in geodatabase, delete features and load in new data
                                if arcpy.Exists(os.path.join(geodatabase, eachTable)):
                                    # Logging
                                    arcpy.AddMessage("Updating table - " + os.path.join(geodatabase, eachTable) + "...")
                                    if (enableLogging == "true"):
                                       logger.info("Updating table - " + os.path.join(geodatabase, eachTable) + "...")

                                    arcpy.DeleteFeatures_management(os.path.join(geodatabase, eachTable))
                                    arcpy.Append_management(os.path.join(arcpy.env.workspace, eachTable), os.path.join(geodatabase, eachTable), "NO_TEST", "", "")

                                    # Get dataset count
                                    datasetCount = arcpy.GetCount_management(os.path.join(geodatabase, eachTable)) 
                                    arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                    if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))   
                                else:
                                    # Log warning
                                    arcpy.AddWarning("Warning: " + os.path.join(geodatabase, eachTable) + " does not exist. Copying over...")
                                    # Logging
                                    if (enableLogging == "true"):
                                        logger.warning(os.path.join(geodatabase, eachTable) + " does not exist. Copying over...")

                                    # Copy table into geodatabase using the same dataset name
                                    arcpy.TableSelect_analysis(eachTable, os.path.join(geodatabase, describeDataset.name), "")
                                    
        #################### Custom code for WCRC and BDC ####################
                           # For WCRC data updates
                           if "wcrc" in updateFolder.lower():
                               # For the property details view from WCRC
                               if "vw_propertydetails" in eachTable.lower():
                                   # Copy property details view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_WCRCPropertyDetails"), "")
                                   # Copy property spatial view into file geodatabase and dissolve on valuation ID
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\WCRC.gdb", "Property") + "...")
                                   if (enableLogging == "true"):
                                      logger.info("Copying over feature class - " + os.path.join("D:\Data\WCRC.gdb", "Property") + "...") 
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwWCRCProperty"), os.path.join("D:\Data\WCRC.gdb", "PropertyParcel"), "", "0", "0", "0")
                                   arcpy.Dissolve_management(os.path.join("D:\Data\WCRC.gdb", "PropertyParcel"), os.path.join("D:\Data\WCRC.gdb", "Property"), "ValuationID", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\WCRC.gdb", "Property"), "ValuationID", os.path.join("D:\Data\WCRC.gdb", "PropertyParcel"), "ValuationID", "")

                                   # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\WCRC.gdb", "Property")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))   
                           # For BDC data updates
                           if "bdc" in updateFolder.lower():                             
                               # For the property match table from BDC and WCRC
                               if "matchtable" in eachTable.lower():
                                   # Update the West Coast match table
                                   # WCRC match table - Copy table and tidy up the fields
                                   arcpy.TableSelect_analysis("D:\Data\FTP\WCRC\WCRCPropertyToParcel.csv", os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "")
                                   arcpy.AddField_management(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "ValuationID", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
                                   arcpy.AddField_management(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "ParcelID", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
                                   arcpy.CalculateField_management(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "ValuationID", "!ValRef_Formatted!", "PYTHON_9.3", "")
                                   arcpy.CalculateField_management(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "ParcelID", "!Parcel_ID!", "PYTHON_9.3", "")
                                   arcpy.DeleteField_management(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "QPID;Roll;Assessment;Suffix;ValRef_Formatted;Apportionment;Category;Building_Floor_Area;Building_Site_Cover;Parcel_ID;Physical_Address;Physical_Suburb;Physical_City;Legal_Description")
                                       
                                   # BDC match table - Tidy up the fields
                                   arcpy.AddField_management(eachTable, "ValuationID", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
                                   arcpy.AddField_management(eachTable, "ParcelID", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
                                   arcpy.CalculateField_management(eachTable, "ValuationID", "!val_id!", "PYTHON_9.3", "")
                                   arcpy.CalculateField_management(eachTable, "ParcelID", "!PAR_ID!", "PYTHON_9.3", "")
                                   arcpy.DeleteField_management(eachTable, "PERIMETER;LEGAL_ID;PAR_ID;LEGAL;HOW;ASSESS;FLAG;COMMENT;POLYGONID;Edited_By;Edit_Date;Descriptio;OBJECTID_12;LEGAL_1;OBJECTID_12_13;val_id;val1;root_val_id;ra_unique_id;POINT_X;POINT_Y")
                                   # Copy out the WCRC match table
                                   arcpy.TableSelect_analysis(os.path.join("D:\Data\WCRC.gdb", "CoreLogic_PropertyToParcel"), "in_memory\\PropertyToParcel", "")
                                   # Join the Buller match table
                                   arcpy.JoinField_management("in_memory\\PropertyToParcel", "ValuationID", eachTable, "ValuationID", "ValuationID")
                                   # Select out the non-Buller records
                                   arcpy.TableSelect_analysis("in_memory\\PropertyToParcel", "in_memory\\PropertyToParcel_NoBDC", "ValuationID_1 IS NULL")
                                   # Merge Buller match table with the WCRC match table 
                                   arcpy.Merge_management("in_memory\\PropertyToParcel_NoBDC;" + eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "PropertyToParcel"), "")
                                   arcpy.DeleteField_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "PropertyToParcel"), "ValuationID_1")

                               # For the property view from BDC
                               if "vwproperty" in eachTable.lower():
                                   # Copy property view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_BDCProperty"), "")
                                   # Copy property spatial view into file geodatabase and dissolve on valuation ID
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "Property") + "...")
                                   if (enableLogging == "true"):
                                       logger.info("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "Property") + "...")
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwBDCProperty"), os.path.join("D:\Data\BDC.gdb", "PropertyParcel"), "", "0", "0", "0")
                                   arcpy.Dissolve_management(os.path.join("D:\Data\BDC.gdb", "PropertyParcel"), os.path.join("D:\Data\BDC.gdb", "Property"), "ValuationID", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\BDC.gdb", "Property"), "ValuationID", os.path.join("D:\Data\BDC.gdb", "PropertyParcel"), "ValuationID", "")

                                   # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\BDC.gdb", "Property")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))
                                       
                               # For the resource consent view from BDC
                               if "vwresourceconsent" in eachTable.lower():
                                   # Copy resource consent view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_BDCResourceConsent"), "")
                                   # Copy resource consent spatial view into file geodatabase
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "ResourceConsent") + "...")
                                   if (enableLogging == "true"):
                                       logger.info("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "ResourceConsent") + "...")
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwBDCResourceConsent"), "in_memory\ResourceConsent", "", "0", "0", "0")
                                   arcpy.Dissolve_management("in_memory\ResourceConsent", os.path.join("D:\Data\BDC.gdb", "ResourceConsent"), "ConsentID", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\BDC.gdb", "ResourceConsent"), "ConsentID", "in_memory\ResourceConsent", "ConsentID", "")

                                    # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\BDC.gdb", "ResourceConsent")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))
                                       
                               # For the building consent view from BDC
                               if "vwbuildingconsent" in eachTable.lower():
                                   # Copy building consent view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_BDCBuildingConsent"), "")
                                   # Copy building consent spatial view into file geodatabase
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "BuildingConsent") + "...")
                                   if (enableLogging == "true"):
                                       logger.info("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "BuildingConsent") + "...")
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwBDCBuildingConsent"), "in_memory\BuildingConsent", "", "0", "0", "0")
                                   arcpy.Dissolve_management("in_memory\BuildingConsent", os.path.join("D:\Data\BDC.gdb", "BuildingConsent"), "ConsentID", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\BDC.gdb", "BuildingConsent"), "ConsentID", "in_memory\BuildingConsent", "ConsentID", "")

                                    # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\BDC.gdb", "BuildingConsent")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))
                                       
                               # For the licence view from BDC
                               if "vwlicence" in eachTable.lower():
                                   # Copy licence view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_BDCLicence"), "Valuation_No <> ''")
                                   # Copy licence spatial view into file geodatabase
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "Licence") + "...")
                                   if (enableLogging == "true"):
                                       logger.info("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "Licence") + "...")
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwBDCLicence"), "in_memory\Licence", "", "0", "0", "0")
                                   arcpy.Dissolve_management("in_memory\Licence", os.path.join("D:\Data\BDC.gdb", "Licence"), "LicenceNo", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\BDC.gdb", "Licence"), "LicenceNo", "in_memory\Licence", "LicenceNo", "")

                                    # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\BDC.gdb", "Licence")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))
                                       
                                # For the LIM view from BDC
                               if "vwlim" in eachTable.lower():
                                   # Copy lim view into enterprise geodatabase
                                   arcpy.TableSelect_analysis(eachTable, os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vw_BDCLIM"), "")            
                                   # Copy lim spatial view into file geodatabase
                                   arcpy.AddMessage("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "LIM") + "...")
                                   if (enableLogging == "true"):
                                       logger.info("Copying over feature class - " + os.path.join("D:\Data\BDC.gdb", "LIM") + "...")
                                   arcpy.CopyFeatures_management(os.path.join("D:\Data\Database Connections\GISData@WCCHCGIS1 (gisadmin).sde", "vwBDCLIM"), "in_memory\LIM", "", "0", "0", "0")
                                   arcpy.Dissolve_management("in_memory\LIM", os.path.join("D:\Data\BDC.gdb", "LIM"), "RecordID", "", "MULTI_PART", "DISSOLVE_LINES")
                                   arcpy.JoinField_management(os.path.join("D:\Data\BDC.gdb", "LIM"), "RecordID", "in_memory\LIM", "RecordID", "")

                                   # Get dataset count
                                   datasetCount = arcpy.GetCount_management(os.path.join("D:\Data\BDC.gdb", "LIM")) 
                                   arcpy.AddMessage("Dataset record count - " + str(datasetCount))
                                   if (enableLogging == "true"):
                                       logger.info("Dataset record count - " + str(datasetCount))  
                       else:
                           arcpy.AddWarning("Dataset " + eachTable + " is empty and won't be copied...")                        
                           # Logging
                           if (enableLogging == "true"):
                               logger.warning("Dataset " + eachTable + " is empty and won't be copied...")             
        
        # --------------------------------------- End of code --------------------------------------- #
        # If called from gp tool return the arcpy parameter   
        if __name__ == '__main__':
            # Return the output if there is any
            if output:
                # If ArcGIS desktop installed
                if (arcgisDesktop == "true"):
                    arcpy.SetParameter(1, output)
                # ArcGIS desktop not installed
                else:
                    return output 
        # Otherwise return the result          
        else:
            # Return the output if there is any
            if output:
                return output      
        # Logging
        if (enableLogging == "true"):
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []
    # If arcpy error
    except arcpy.ExecuteError:           
        # Build and show the error message
        errorMessage = arcpy.GetMessages(2)   
        printMessage(errorMessage,"error")           
        # Logging
        if (enableLogging == "true"):
            # Log error          
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []   
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    # If python error
    except Exception as e:
        errorMessage = ""         
        # Build and show the error message
        # If many arguments
        if (e.args):
            for i in range(len(e.args)):        
                if (i == 0):
                    # Python version check
                    if sys.version_info[0] >= 3:
                        # Python 3.x
                        errorMessage = str(e.args[i]).encode('utf-8').decode('utf-8')
                    else:
                        # Python 2.x
                        errorMessage = unicode(e.args[i]).encode('utf-8')
                else:
                    # Python version check
                    if sys.version_info[0] >= 3:
                        # Python 3.x
                        errorMessage = errorMessage + " " + str(e.args[i]).encode('utf-8').decode('utf-8')
                    else:
                        # Python 2.x
                        errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
        # Else just one argument
        else:
            errorMessage = e
        printMessage(errorMessage,"error")
        # Logging
        if (enableLogging == "true"):
            # Log error            
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []   
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)            
Exemplo n.º 14
0
def mainFunction(featureClasses,tables,outputFolder,backupName,uniqueID): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)  
    try:
        # --------------------------------------- Start of code --------------------------------------- #
        # Setup geodatabase to load data into in temporary workspace
        if (uniqueID == "true"):
            backupName = backupName + "-" + str(uuid.uuid1())
        tempFolder = arcpy.CreateFolder_management(arcpy.env.scratchFolder, backupName)
        arcpy.CreateFileGDB_management(tempFolder, "Data", "CURRENT")
        geodatabase = os.path.join(str(tempFolder), "Data.gdb")

        arcpy.AddMessage("Copying datasets...")        
        # Load the feature classes and tables into a list if input values provided
        if (len(featureClasses) > 0):
            # Remove out apostrophes
            featureclassList = string.split(str(featureClasses).replace("'", ""), ";")
            # Loop through the feature classes
            for eachFeatureclass in featureclassList:
               # Create a Describe object from the dataset
               describeDataset = arcpy.Describe(eachFeatureclass)
               # Copy feature class into geodatabase using the same dataset name
               arcpy.CopyFeatures_management(eachFeatureclass, os.path.join(geodatabase, describeDataset.name), "", "0", "0", "0")
                
        if (len(tables) > 0):    
            tableList = string.split(str(tables).replace("'", ""), ";")
            # Loop through of the tables
            for eachTable in tableList:
               # Create a Describe object from the dataset
               describeDataset = arcpy.Describe(eachTable)
               # Copy feature class into geodatabase using the same dataset name
               arcpy.TableSelect_analysis(eachTable, os.path.join(geodatabase, describeDataset.name), "")
      
        # Check input datasets are provided before zipping up
        if ((len(featureClasses) > 0) or (len(tables) > 0) or (len(csvFiles) > 0)):
            arcpy.AddMessage("Zipping data...")
            # Setup the zip file
            zipFile = os.path.join(outputFolder, backupName + ".zip")
            zippedFolder = zipfile.ZipFile(zipFile, "w", allowZip64=True)

            # Zip up the geodatabase
            root_len = len(os.path.abspath(str(tempFolder)))
            # For each of the directories in the folder
            for root, dirs, files in os.walk(str(tempFolder)):
              archive_root = os.path.abspath(root)[root_len:]
              # For each file
              for f in files:
                fullpath = os.path.join(root, f)
                archive_name = os.path.join(archive_root, f)
                zippedFolder.write(fullpath, archive_name)
            # Close zip file
            zippedFolder.close()        

        # --------------------------------------- End of code --------------------------------------- #
        # If called from gp tool return the arcpy parameter   
        if __name__ == '__main__':
            # Return the output if there is any
            if output:
                # If ArcGIS desktop installed
                if (arcgisDesktop == "true"):
                    arcpy.SetParameter(1, output)
                # ArcGIS desktop not installed
                else:
                    return output 
        # Otherwise return the result          
        else:
            # Return the output if there is any
            if output:
                return output      
        # Logging
        if (enableLogging == "true"):
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []
    # If arcpy error
    except arcpy.ExecuteError:           
        # Build and show the error message
        errorMessage = arcpy.GetMessages(2)   
        printMessage(errorMessage,"error")           
        # Logging
        if (enableLogging == "true"):
            # Log error          
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []   
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    # If python error
    except Exception as e:
        errorMessage = ""         
        # Build and show the error message
        # If many arguments
        if (e.args):
            for i in range(len(e.args)):        
                if (i == 0):
                    # Python version check
                    if sys.version_info[0] >= 3:
                        # Python 3.x
                        errorMessage = str(e.args[i]).encode('utf-8').decode('utf-8')
                    else:
                        # Python 2.x
                        errorMessage = unicode(e.args[i]).encode('utf-8')
                else:
                    # Python version check
                    if sys.version_info[0] >= 3:
                        # Python 3.x
                        errorMessage = errorMessage + " " + str(e.args[i]).encode('utf-8').decode('utf-8')
                    else:
                        # Python 2.x
                        errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')
        # Else just one argument
        else:
            errorMessage = e
        printMessage(errorMessage,"error")
        # Logging
        if (enableLogging == "true"):
            # Log error            
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file        
            logMessage.flush()
            logMessage.close()
            logger.handlers = []   
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)            
Exemplo n.º 15
0
 print "delete exists CellThiessCache"
 logging.info("delete exists CellThiessCache")
 if (arcpy.Exists(CellThiessCache)):
     arcpy.Delete_management(CellThiessCache, "FeatureClass")
 print "delete exists PointInCache"
 logging.info("delete exists PointInCache")
 if (arcpy.Exists(PointInCache)):
     arcpy.Delete_management(PointInCache, "FeatureClass")
 XiaoQuFields = arcpy.ListFields(infc)
 PointInCache = arcpy.CreateFeatureclass_management(
     os.path.dirname(PointInCache),
     os.path.basename(PointInCache), "Point", PointTemplate,
     "DISABLED", "DISABLED", arcpy.SpatialReference(4326))
 print "筛选室内站生成要素"
 ShiNeiCell_LTE_ALL = arcpy.TableSelect_analysis(
     infc, "in_memory/ShiNeiCell_LTE_ALL",
     "HONEYCOMB_TYPE='室内' and TIME_STAMP = TO_DATE('" +
     currentDoTimeStr + "','YYYY-MM-DD HH24:MI:SS')")
 createFeatureFromXY(ShiNeiCell_LTE_ALL, "LONGITUDE",
                     "LATITUDE", PointInCache, XiaoQuFields)
 print "生成室内站圆形"
 arcpy.Buffer_analysis(PointInCache, CellShanXing, "30 Meters",
                       "FULL", "ROUND", "NONE", "")
 CellThiessFinal = arcpy.CreateFeatureclass_management(
     os.path.dirname(CellThiessFinal),
     os.path.basename(CellThiessFinal), "Polygon",
     CellThiessFinalTemp, "DISABLED", "DISABLED",
     arcpy.SpatialReference(4326))
 arcpy.AddIndex_management(CellThiessFinal, "CI;CITY_NAME",
                           "CCNIndex", "NON_UNIQUE",
                           "NON_ASCENDING")
 arcpy.AddIndex_management(CellThiessFinal, "TIME_STAMP",
Exemplo n.º 16
0
Input_Join_Field = arcpy.GetParameterAsText(3)

Target_dataset = arcpy.GetParameterAsText(4)

Geometria = arcpy.GetParameterAsText(5)

Feature_Layer = arcpy.GetParameterAsText(6)

# Local variables:
Osite = Laskenta_puusto
Output_Table = Laskenta_puusto
Geomteria_join = Output_Table
Output_Features = Geomteria_join

# Process: Iterate Osite Values
arcpy.IterateFieldValues_mb(Laskenta_puusto, Osite_field, "String", "true", "false", "0")

# Process: Remove Join
arcpy.RemoveJoin_management("", "XFOREST_SOSLASPSTO_TableSele")

# Process: Table Select
arcpy.TableSelect_analysis(Laskenta_puusto, Output_Table, "\"%Osite field%\"=%Osite%")

# Process: Add Join
arcpy.AddJoin_management(Geometria, Input_Join_Field, Output_Table, Output_Join_Field, "KEEP_COMMON")

# Process: Append
arcpy.Append_management("''", Target_dataset, "TEST", "", "")

Exemplo n.º 17
0
def crashReportLRS(GDBspot, csv, fatalwt, seriouswt, nonseriouswt, possiblewt,
                   IntersectionThreshold, SegmentThreshold):
    # workspace= "Z:/fullerm/Safety Locations/Safety.gdb"

    # Input parameters
    # GCAT file/location
    GCATfile = csv  # csv created after mapping fields with schemaCleaner
    # Intermediate file/location?

    # Intersection polygon file/location
    # IntersectionFeatures = arcpy.GetParameterAsText(1)
    # IntersectionFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/Fed_Aid_2010_LucWoo_Intersection_Buffer_Dissolve"
    IntersectionFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/LMW_intersection_250ft_buffer_5Jul2017_3857"
    psFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/CS_IP_Merge_copy_clip_16june2017_LMW_3857"
    psThreshold = str(0)
    countyFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/County_FLOWHHMS_Clipped_3857"
    # Segment polygon file/location
    # SegmentFeatures = arcpy.GetParameterAsText(2)
    SegmentFeatures = "Z:/fullerm/Safety Locations/Crash_Report_Script_Tool.gdb/LMW_segments_70ft_buffer_5Jul2017_3857"
    # output file name/location for the spatial join
    # GDBspot = arcpy.GetParameterAsText(1)  # user input location for gdb and result excel tables

    # psThreshold = arcpy.GetParameterAsText(8)
    # output file name/location for excel table
    # TableFolder = arcpy.GetParameterAsText(4)
    # rdinv = arcpy.GetParameterAsText(8)
    rdinv = "C:/Users/fullerm/Documents/ArcGIS/Projects/Safety Report Script/Safety Report Script.gdb/Road_Inventory_CopyFeatures"
    # create geodatabase
    TimeDate = datetime.now()
    TimeDateStr = "CrashLocations" + TimeDate.strftime('%Y%m%d%H%M') + "_LRS"

    outputGDB = arcpy.CreateFileGDB_management(GDBspot, TimeDateStr)
    arcpy.env.workspace = str(outputGDB).replace('/', '\\')
    # I kept getting errors because arcmap sets the field type based on first dozen features and some ids were numeric
    '''fldmppng = arcpy.FieldMappings()
    fldmppng.addTable(GCATfile)

    nameFI = fldmppng.findFieldMapIndex("LOCAL_REPORT_NUMBER_ID")
    fldmp = fldmppng.getFieldMap(nameFI)
    fld = fldmp.outputField

    fld.name = "LOCAL_REPORT_NUMBER_ID"
    fld.aliasName = "LOCAL_REPORT_NUMBER_ID"
    fld.type = "String"
    fldmp.outputField = fld
    fldmppng.replaceFieldMap(nameFI,fldmp)'''
    # convert GCAT txt file to gdb table and add to map
    NewTable = arcpy.TableToTable_conversion(GCATfile, outputGDB, "OHMI_data")
    arcpy.TableSelect_analysis(NewTable, "oh_table",
                               '"NLF_COUNTY_CD" <> \'Monroe\' ')
    ohTable = arcpy.CopyRows_management("oh_table", "ohTable")
    arcpy.TableSelect_analysis(NewTable, "mi_table",
                               '"NLF_COUNTY_CD" = \'Monroe\' ')
    miTable = arcpy.CopyRows_management('mi_table', 'miTable')
    # arcpy.SelectLayerByAttribute_management(NewTable,"CLEAR_SELECTION")
    rdlyr = arcpy.MakeFeatureLayer_management(rdinv, "rdlyr")
    rtloc = os.path.join(
        GDBspot, "Road_Inventory3456_CreateRoutes" + TimeDateStr + ".shp")
    lrs = arcpy.CreateRoutes_lr(rdlyr, "NLF_ID", rtloc, "TWO_FIELDS",
                                "CTL_BEGIN", "CTL_END")
    event_props = "NLFID POINT COUNTY_LOG_NBR"
    PointFile = arcpy.MakeRouteEventLayer_lr(lrs, "NLF_ID", ohTable,
                                             event_props, "Crash_Events")
    # creating this extra feature class and working from it instead of the event layer
    # decreased script tool runtime from ~8 min to ~2 min
    arcpy.SelectLayerByAttribute_management(PointFile, "clear_selection")

    pointOH = arcpy.FeatureClassToFeatureClass_conversion(
        PointFile, outputGDB, "GCAT_LUCWOO_lrs_points_" + TimeDateStr)
    # pointOH = arcpy.CopyFeatures_management(PointFile, "LRS_Events_copy")
    mi_points = milocationsxy(miTable, outputGDB)
    pointcopy = arcpy.Merge_management([pointOH, mi_points], 'miohpointsmerge')

    dict = {
        'fatalities_count':
        "ODPS_TOTAL_FATALITIES_NBR<>0",
        'incapac_inj_count':
        "Incapac_injuries_NBR<>0 and ODPS_TOTAL_FATALITIES_NBR=0",
        'non_incapac_inj_count':
        "non_incapac_injuries_NBR<>0 and ODPS_TOTAL_FATALITIES_NBR=0 and incapac_injuries_nbr=0",
        'possible_inj_count':
        "possible_injuries_nbr<>0 and ODPS_TOTAL_FATALITIES_NBR=0 and non_incapac_injuries_nbr=0 and incapac_injuries_nbr=0"
    }
    fld_lst = [
        'SEVERITY_BY_TYPE_CD', 'fatalities_count', 'incapac_inj_count',
        'non_incapac_inj_count', 'possible_inj_count'
    ]

    # add fields for point layer

    for key in dict:
        arcpy.AddField_management(pointcopy, key, "LONG")
        '''arcpy.SelectLayerByAttribute_management(PointFile, "NEW_SELECTION", dict[key])
        arcpy.CalculateField_management(PointFile, key, 1)
        arcpy.SelectLayerByAttribute_management(PointFile, "Switch_selection")
        arcpy.CalculateField_management(PointFile, key, 0)'''
    # fillCountFields(pointcopy, fld_lst)
    with arcpy.da.UpdateCursor(pointcopy, fld_lst) as cursor:
        for row in cursor:
            if row[0] == 'Fatal Crashes':
                row[1] = 1
                row[2] = 0
                row[3] = 0
                row[4] = 0
            elif row[0] == 'Incapacitating Injury Crashes':
                row[1] = 0
                row[2] = 1
                row[3] = 0
                row[4] = 0
            elif row[0] == 'Non-Incapacitating Injury Crashes':
                row[1] = 0
                row[2] = 0
                row[3] = 1
                row[4] = 0
            elif row[0] == 'Possible Injury Crashes':
                row[1] = 0
                row[2] = 0
                row[3] = 0
                row[4] = 1
            else:
                row[1] = 0
                row[2] = 0
                row[3] = 0
                row[4] = 0
            cursor.updateRow(row)

    # Clear Selected Features
    arcpy.SelectLayerByAttribute_management(PointFile, "clear_selection")
    # PointFeatures2 = arcpy.CopyFeatures_management(PointFeatures,os.path.join(GDBspot, TimeDateStr + ".gdb\PointFeatures2"))
    PointFeatures = arcpy.FeatureClassToFeatureClass_conversion(
        pointcopy, outputGDB, "ohmi_points_copy" + TimeDateStr)
    ftype = {
        'Intersection': [IntersectionThreshold, IntersectionFeatures],
        'Segment': [SegmentThreshold, SegmentFeatures],
        'Subdivision': [psThreshold, psFeatures]
    }
    # field map and merge rules
    attchmnt = []
    writer = pandas.ExcelWriter(os.path.join(GDBspot, "Top_Locations.xlsx"),
                                engine='xlsxwriter')
    for f in ftype:

        # Create a new fieldmappings and add the two input feature classes.
        fieldmappings = arcpy.FieldMappings()
        fieldmappings.addTable(ftype[f][1])
        fieldmappings.addTable(PointFeatures)

        # First get the fieldmaps. POP1990 is a field in the cities feature class.
        # The output will have the states with the attributes of the cities. Setting the
        # field's merge rule to mean will aggregate the values for all of the cities for
        # each state into an average value. The field is also renamed to be more appropriate
        # for the output.
        addSumFlds(fieldmappings)

        # Run the Spatial Join tool, using the defaults for the join operation and join type
        loc = os.path.join(GDBspot, TimeDateStr + ".gdb\\" + f + "Join_LRS")
        Join = arcpy.SpatialJoin_analysis(ftype[f][1], PointFeatures, loc,
                                          "Join_one_to_one", "keep_all",
                                          fieldmappings)

        arcpy.AddField_management(Join, "PDO_", "LONG")
        arcpy.AddField_management(Join, "EPDO_Index", "DOUBLE")
        # CRLRS_EPDO_index(Join)
        CursorFlds = [
            'PDO_', 'EPDO_Index', 'Join_Count', 'sum_fatalities_count',
            'sum_incapac_inj_count', 'sum_non_incapac_inj_count',
            'sum_possible_inj_count'
        ]

        # determine PDO  and EPDO Index/Rate
        with arcpy.da.UpdateCursor(Join, CursorFlds) as cursor:
            for row in cursor:
                try:
                    row[0] = row[2] - int(row[3]) - int(row[4]) - int(
                        row[5]) - int(row[6])
                except:
                    row[0] = 0  # null or divide by zero are the major exceptions we are handling here
                try:
                    row[1] = (float(row[3]) * fatalwt + float(row[4]) *
                              seriouswt + float(row[5]) * nonseriouswt +
                              float(row[6]) * possiblewt +
                              float(row[0])) / float(row[2])
                except:
                    row[1] = 0  # null or divide by zero are the major exceptions we are handling here
                cursor.updateRow(row)

        # delete unnecessary fields
        keepFlds = [
            'OBJECTID', 'Shape', 'Shape_Area', 'Shape_Length', 'Name',
            'NAMELSAD', 'COUNTY', 'COUNTY_NME', 'Join_Count',
            'sum_fatalities_count', 'sum_incapac_inj_count',
            'sum_non_incapac_inj_count', 'sum_possible_inj_count', 'PDO_',
            'EPDO_Index', 'Fed_Aid_Buffer_Segments_2_Name', 'Length_ft',
            'County'
        ]
        # lstFlds = arcpy.ListFields(Join)

        dropFlds = [
            x.name for x in arcpy.ListFields(Join) if x.name not in keepFlds
        ]
        # delete fields
        arcpy.DeleteField_management(Join, dropFlds)

        # select high crash locations
        JoinLayer = arcpy.MakeFeatureLayer_management(
            Join,
            os.path.join(GDBspot, TimeDateStr + ".gdb\\" + f + "JoinLayer"))
        arcpy.AddMessage("{}".format(type(JoinLayer)))
        # arcpy.SelectLayerByAttribute_management(JoinLayer, "NEW_SELECTION", "Join_Count >=" + ftype[f][0])
        fld_nmes = [fld.name for fld in arcpy.ListFields(JoinLayer)]

        fld_nmes.remove(
            'Shape'
        )  # I think this field kept causing an exception: Data must be 1 dimensional
        arcpy.AddMessage("{}".format(fld_nmes))
        arcpy.AddMessage("{}".format(
            type(
                os.path.join(GDBspot,
                             TimeDateStr + ".gdb\\" + f + "JoinLayer"))))
        # do this because political sud
        # fields can be list or tuple, list works when 'Shape' field removed
        n = arcpy.da.FeatureClassToNumPyArray(JoinLayer,
                                              fld_nmes,
                                              where_clause="Join_Count  >=" +
                                              ftype[f][0],
                                              skip_nulls=False,
                                              null_value=0)

        df = pandas.DataFrame(n)

        CRLRS_excel_export(df, f, writer)

    writer.save()
    return os.path.join(GDBspot, "Top_Locations.xlsx")
Exemplo n.º 18
0
def mainFunction(propertyTitlesFeatureClass,memorialsTable,suburbsFeatureClass,mortgageFeatureClass,mortgageSuburbsFeatureClass): # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)  
    try:          
        # --------------------------------------- Start of code --------------------------------------- #

        # Set the banks
        banks = ["Auckland Savings Bank","Australia and New Zealand Banking Group","Australian Mutual Provident Society","Bank of New Zealand","Heartland Bank","Kiwibank","New Zealand Home Loans","PGG Wrightson","Rabobank","Southland Building Society","Sovereign","Taranaki Savings Bank","The Co-Operative Bank","Wairarapa Building Society","Welcome Home Loan","Westpac","Other"]

        # Copy property titles and select out current mortgage data
        arcpy.AddMessage("Extracting mortgage data...")
        arcpy.TableSelect_analysis(memorialsTable, os.path.join(arcpy.env.scratchGDB, "Mortgage"), "instrument_type = 'Mortgage' AND current = 'T'")
        arcpy.Select_analysis(propertyTitlesFeatureClass, os.path.join(arcpy.env.scratchGDB, "PropertyTitles"), "")

        arcpy.AddMessage("Creating mortgage feature class...")
        # Join property titles and mortgage data
        arcpy.MakeQueryTable_management(os.path.join(arcpy.env.scratchGDB, "PropertyTitles") + ";" + os.path.join(arcpy.env.scratchGDB, "Mortgage"), "PropertyTitlesMortgageLayer", "USE_KEY_FIELDS", "", "", "PropertyTitles.title_no = Mortgage.title_no")
        arcpy.Select_analysis("PropertyTitlesMortgageLayer", mortgageFeatureClass, "")

        # Cleaning up fields
        arcpy.AddMessage("Cleaning up fields...")
        arcpy.AddField_management(mortgageFeatureClass, "mortgage_provider", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        # Calculating mortgage provider field from memorial text
        arcpy.CalculateField_management(mortgageFeatureClass, "mortgage_provider", "changeValue(!memorial_text!)", "PYTHON_9.3", "def changeValue(var):\\n  if \"ANZ\" in var:\\n    return \"Australia and New Zealand Banking Group\"\\n  if \"National Bank of New Zealand\" in var:\\n    return \"Australia and New Zealand Banking Group\"\\n  if \"Post Office Bank\" in var:\\n    return \"Australia and New Zealand Banking Group\"\\n  if \"Westpac\" in var:\\n    return \"Westpac\"\\n  if \"Home Mortgage Company\" in var:\\n    return \"Westpac\"\\n  if \"Trust Bank New Zealand\" in var:\\n    return \"Westpac\"\\n  if \"ASB\" in var:\\n    return \"Auckland Savings Bank\"\\n  if \"Bank of New Zealand\" in var:\\n    return \"Bank of New Zealand\"\\n  if \"Kiwibank\" in var:\\n    return \"Kiwibank\"\\n  if \"TSB\" in var:\\n    return \"Taranaki Savings Bank\"\\n  if \"Southland Building Society\" in var:\\n    return \"Southland Building Society\"\\n  if \"AMP\" in var:\\n    return \"Australian Mutual Provident Society\"\\n  if \"Rabobank\" in var:\\n    return \"Rabobank\"\\n  if \"Rabo Wrightson\" in var:\\n    return \"Rabobank\"\\n  if \"Countrywide\" in var:\\n    return \"Australia and New Zealand Banking Group\"\\n  if \"Mortgage Holding Trust\" in var:\\n    return \"Sovereign\"\\n  if \"Co-operative Bank\" in var:\\n    return \"The Co-Operative Bank\"\\n  if \"Co-Operative Bank\" in var:\\n    return \"The Co-Operative Bank\"\\n  if \"PSIS\" in var:\\n    return \"The Co-Operative Bank\"\\n  if \"New Zealand Home Lending\" in var:\\n    return \"New Zealand Home Loans\"\\n  if \"Wairarapa Building Society\" in var:\\n    return \"Wairarapa Building Society\"\\n  if \"PGG Wrightson\" in var:\\n    return \"PGG Wrightson\"\\n  if \"Heartland Bank\" in var:\\n    return \"Heartland Bank\"\\n  if \"Heartland Building Society\" in var:\\n    return \"Heartland Bank\"\\n  if \"Housing New Zealand\" in var:\\n    return \"Welcome Home Loan\"\\n  if \"Housing Corporation of New Zealand\" in var:\\n    return \"Welcome Home Loan\"\\n  else:\\n    return \"Other\"")
        arcpy.DeleteField_management(mortgageFeatureClass, "id;spatial_extents_shared;OBJECTID_1;id_1;title_no_1;land_district_1")
        arcpy.AddField_management(mortgageFeatureClass, "land_area", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(mortgageFeatureClass, "land_area", "!SHAPE_Area!", "PYTHON_9.3", "")        
      
        # Copy suburbs data
        arcpy.Select_analysis(suburbsFeatureClass, os.path.join(arcpy.env.scratchGDB, "Suburb"), "")
       
        # Spatially join suburb info
        arcpy.AddMessage("Analysing mortgages by suburb...")
        arcpy.SpatialJoin_analysis(mortgageFeatureClass, os.path.join(arcpy.env.scratchGDB, "Suburb"), os.path.join(arcpy.env.scratchGDB, "MortgageSuburbs"), "JOIN_ONE_TO_ONE", "KEEP_ALL", "", "INTERSECT", "", "")
        # Summary stats for suburbs
        arcpy.Statistics_analysis(os.path.join(arcpy.env.scratchGDB, "MortgageSuburbs"), os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStats"), "mortgage_provider COUNT", "SUBURB_4THORDER;mortgage_provider")

        # Add the banks count fields
        for bank in banks:
            arcpy.AddField_management(os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStats"), bank.replace(" ", "_").replace("-", "_"), "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
            arcpy.CalculateField_management(os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStats"), bank.replace(" ", "_").replace("-", "_"), "0", "PYTHON_9.3", "")

        # Get the banks fields
        for count,value in enumerate(banks):
            banks[count] = value.replace(" ", "_").replace("-", "_").replace("(", "_").replace(")", "_")
            
        fields = ["SUBURB_4THORDER","mortgage_provider","FREQUENCY"] + banks
        with arcpy.da.UpdateCursor(os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStats"), fields) as cursor:
            # For each row
            for row in cursor:
                suburb = row[0]
                mortgageProvider = row[1]
                mortgageProvider = mortgageProvider.replace(" ", "_").replace("-", "_").replace("(", "_").replace(")", "_")
                count = row[2]

                # Update the mortgage provider row with its count
                row[fields.index(mortgageProvider)] = count
                cursor.updateRow(row)

        # Dissolve the stats
        arcpy.Statistics_analysis(os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStats"), os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStatsDissolved"), "FREQUENCY SUM;Auckland_Savings_Bank SUM;Australia_and_New_Zealand_Banking_Group SUM;Australian_Mutual_Provident_Society SUM;Bank_of_New_Zealand SUM;Heartland_Bank SUM;Kiwibank SUM;New_Zealand_Home_Loans SUM;PGG_Wrightson SUM;Rabobank SUM;Southland_Building_Society SUM;Sovereign SUM;Taranaki_Savings_Bank SUM;The_Co_Operative_Bank SUM;Wairarapa_Building_Society SUM;Welcome_Home_Loan SUM;Westpac SUM;Other SUM;", "SUBURB_4THORDER")

        # Create mortgage suburbs feature class
        arcpy.AddMessage("Creating mortgage suburbs feature class...")
        arcpy.Select_analysis(suburbsFeatureClass, mortgageSuburbsFeatureClass, "")
        arcpy.AddField_management(mortgageSuburbsFeatureClass, "land_area", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(mortgageSuburbsFeatureClass, "land_area", "!SHAPE_Area!", "PYTHON_9.3", "")                
        # Join on mortgages suburb data
        arcpy.JoinField_management(mortgageSuburbsFeatureClass, "SUBURB_4THORDER", os.path.join(arcpy.env.scratchGDB, "MortgageSuburbsStatsDissolved"), "SUBURB_4THORDER", "")
        arcpy.DeleteField_management(mortgageSuburbsFeatureClass, "FREQUENCY;SUBURB_1STORDER;SUBURB_2NDORDER;SUBURB_3RDORDER")
        arcpy.AlterField_management(mortgageSuburbsFeatureClass, "SUBURB_4THORDER", "Suburb", "", "TEXT", "60", "NULLABLE", "false")
        arcpy.AlterField_management(mortgageSuburbsFeatureClass, "SUM_FREQUENCY", "SUM_ALL", "", "DOUBLE", "8", "NULLABLE", "false")
     
        # --------------------------------------- End of code --------------------------------------- #  
            
        # If called from gp tool return the arcpy parameter   
        if __name__ == '__main__':
            # Return the output if there is any
            if output:
                arcpy.SetParameterAsText(1, output)
        # Otherwise return the result          
        else:
            # Return the output if there is any
            if output:
                return output      
        # Logging
        if (enableLogging == "true"):
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file            
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
    # If arcpy error
    except arcpy.ExecuteError:           
        # Build and show the error message
        errorMessage = arcpy.GetMessages(2)   
        arcpy.AddError(errorMessage)           
        # Logging
        if (enableLogging == "true"):
            # Log error          
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    # If python error
    except Exception as e:
        errorMessage = ""
        # Build and show the error message
        for i in range(len(e.args)):
            if (i == 0):
                # Python version check
                if sys.version_info[0] >= 3:
                    # Python 3.x
                    errorMessage = str(e.args[i]).encode('utf-8').decode('utf-8')                 
                else:
                    # Python 2.x
                    errorMessage = unicode(e.args[i]).encode('utf-8')
            else:
                # Python version check
                if sys.version_info[0] >= 3:
                    # Python 3.x
                    errorMessage = errorMessage + " " + str(e.args[i]).encode('utf-8').decode('utf-8')
                else:
                    # Python 2.x
                    errorMessage = errorMessage + " " + unicode(e.args[i]).encode('utf-8')               
        arcpy.AddError(errorMessage)              
        # Logging
        if (enableLogging == "true"):
            # Log error            
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")            
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)            
Exemplo n.º 19
0
# Process: Join Field (5)
arcpy.JoinField_management(pipeconn, "TREKK_ID", pipeconn_size_mismatch_check,
                           "TREKK_ID", "COUNT_pipe_size_")

# Process: Summary Statistics (2)
arcpy.Statistics_analysis(pipeconn, pipeconn_material_mismatch_check,
                          "pipe_material_ COUNT", "TREKK_ID;pipe_material_")

# Process: Join Field (4)
arcpy.JoinField_management(pipeconn, "TREKK_ID",
                           pipeconn_material_mismatch_check, "TREKK_ID",
                           "COUNT_pipe_material_")

# Process: Table Select (2)
arcpy.TableSelect_analysis(pipeconn__2_, pipeconn_could_not_draw,
                           "latitude_1 IS NULL OR latitude_12 IS NULL")

# Process: Table Select (3)
arcpy.TableSelect_analysis(
    pipeconn__2_, pipeconn_to_draw_out,
    "(direction_ = 'Out ' AND latitude_1 IS NOT NULL AND latitude_12 IS NOT NULL)"
)

# Process: XY To Line (2)
arcpy.XYToLine_management(
    pipeconn_to_draw_out, pipeconn_lines_out, "longitude_1", "latitude_1",
    "longitude_12", "latitude_12", "0", "fulcrum_id",
    "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8.98315284119522E-09;0.001;0.001;IsHighPrecision"
)

# Process: Table Select
Exemplo n.º 20
0
tmpPoly = "in_memory/tmpPoly"
arcpy.RasterToPolygon_conversion(subPatchRaster,tmpPoly,"NO_SIMPLIFY")
dslvPoly =  "in_memory/dslvPoly"
arcpy.Dissolve_management(tmpPoly,dslvPoly,"grid_code")

# Convert the subpatch polygons to a coverage
msg("Converting subpatch zones to an Arc/Info coverage")
arcpy.FeatureclassToCoverage_conversion("%s POLYGON" %dslvPoly,subpatchCov)
subpatchCovPoly = arcpy.SelectData_management(subpatchCov,"polygon").getOutput(0)
subpatchCovArc = arcpy.SelectData_management(subpatchCov,"arc").getOutput(0)

# Select records into tmpTable
msg("Creating connectitivty table")
subPatchEdgeTbl = "in_memory/subPatchEdgeTbl"
selString = r'"$LEFTPOLYGON" > 1 AND "$RIGHTPOLYGON" > 1'
arcpy.TableSelect_analysis(subpatchCovArc,subPatchEdgeTbl,selString)

# Join ID codes to the subPatchedgeTable
msg("Joining IDs to connecitivty table")
arcpy.JoinField_management(subPatchEdgeTbl, "LEFTPOLYGON", subpatchCovPoly, "SUBPATCHPOLY#", "GRID_CODE")
arcpy.JoinField_management(subPatchEdgeTbl, "RIGHTPOLYGON", subpatchCovPoly, "SUBPATCHPOLY#", "GRID_CODE")

# Initialize output edge list
edgeList = open(edgeListFN, 'w')
edgeList.write("FromID, ToID\n")

# Write values to an edge list
msg("Writing edge list to %s" %edgeListFN)
rows = arcpy.SearchCursor(subPatchEdgeTbl,'"GRID_CODE" > 0 AND "GRID_CODE_1" > 0')
row = rows.next()
while row:
GeoTweet_dbf__6_ = GeoTweet_dbf__5_
GeoTweet_dbf__8_ = GeoTweet_dbf__6_
GeoTweet_dbf__9_ = GeoTweet_dbf__8_
GeoTweet_dbf__12_ = GeoTweet_dbf__9_
GeoTweet_dbf__7_ = GeoTweet_dbf__12_
GeoTweet_dbf__11_ = GeoTweet_dbf__7_
GeoTweet_dbf__14_ = GeoTweet_dbf__11_
GeoTweet_dbf__13_ = GeoTweet_dbf__14_
GeoTweet_dbf__10_ = GeoTweet_dbf__13_
GeoTweet_dbf__18_ = GeoTweet_dbf__10_
GeoTweet_dbf__16_ = GeoTweet_dbf__18_
GeoTweet_dbf__15_ = GeoTweet_dbf__16_
GeoTweet_Layer = GeoTweet_dbf__15_

# Process: Select Geo Tweet
arcpy.TableSelect_analysis(Tweet, GeoTweet_dbf,
                           "\"TWEETLAT\" <> '0.0 ' AND \"TWEETLAT\" <> '0.0'")

# Process: Add X Field
arcpy.AddField_management(GeoTweet_dbf, "X", "DOUBLE", "", "", "", "",
                          "NULLABLE", "NON_REQUIRED", "")

# Process: Add Y Field
arcpy.AddField_management(GeoTweet_dbf__2_, "Y", "DOUBLE", "", "", "", "",
                          "NULLABLE", "NON_REQUIRED", "")

# Process: Calculate X
arcpy.CalculateField_management(GeoTweet_dbf__3_, "X", "[TWEETLON]", "VB", "")

# Process: Calculate Y
arcpy.CalculateField_management(GeoTweet_dbf__4_, "Y", "[TWEETLAT]", "VB", "")
Exemplo n.º 22
0
import arcpy

# Input excel file
excel = "T:/GIS_Tools/ITS/Quintile/Res_Quintile_05152019.xlsx/Sheet 1$"

# Input parcels from Enterprise
parcels = "C:/Users/laurens/AppData/Roaming/ESRI/Desktop10.6/ArcCatalog/GISENT.sde/GISENT.GIS.Parcels_All"

# Output quintiles from Enterprise
#quintiles = "C:/Users/laurens/AppData/Roaming/ESRI/Desktop10.6/ArcCatalog/GISENT.sde/GISENT.ASSESS.Quintiles"
quintiles = "T:/GIS_Tools/ITS/Quintile/ASSESS@GISENT_dc.sde/GISENT.ASSESS.Quintiles"

# Process to get Quintile Year feature
print "Creating Features..."
arcpy.TableSelect_analysis(excel, "in_memory/QuintileTable")
arcpy.MakeTableView_management("in_memory/QuintileTable", "Quintile_lyr")
arcpy.MakeFeatureLayer_management(parcels, "parcels_lyr")
arcpy.AddJoin_management("parcels_lyr", "PID_NO", "Quintile_lyr", "GIS_Number",
                         "KEEP_COMMON")
arcpy.Dissolve_management("parcels_lyr", "in_memory/QuintileDissolve",
                          "QuintileTable.Quintile_Year")
arcpy.AlterField_management("in_memory/QuintileDissolve",
                            "QuintileTable_Quintile_Year", "Quintile_Year",
                            "Quintile_Year")

# Store current year for updating attributes
import datetime
now = datetime.datetime.now()
current_year = now.year

# Calculate the next review year as a new field
Exemplo n.º 23
0
def deduplicate_nhd(in_feature_class,
                    out_feature_class='',
                    unique_id='Permanent_Identifier'):
    """
    Returns an single feature class for all NHD features with no duplicated identifiers in it.
    :param in_feature_class: A feature class resulting from merging features from NHD datasets staged by subregion.
    :param out_feature_class: Optional. The feature class which will be created.
    :param unique_id: Optional. The identifier that needs to be unique in the output.
    :return:
    """
    # SETUP
    if out_feature_class:
        arcpy.AddMessage("Copying initial features to output...")
        arcpy.CopyFeatures_management(in_feature_class, out_feature_class)
    else:
        out_feature_class = in_feature_class

    # EXECUTE
    # Delete full identicals first--these come from overlaps in staged subregion data
    before_count = int(
        arcpy.GetCount_management(out_feature_class).getOutput(0))
    arcpy.AddMessage("Deleting full identicals...")
    # Check for full identicals on original *attribute fields*, excluding the one we specifically created to make them distinct
    # Also excluding object ID since that is obviously distinct
    excluded_fields = [
        'Shape', 'Shape_Length', 'Shape_Area', 'OBJECTID', 'nhd_merge_id'
    ]
    check_fields = [
        f.name for f in arcpy.ListFields(out_feature_class)
        if f.name not in excluded_fields
    ]
    arcpy.DeleteIdentical_management(out_feature_class, check_fields)
    after_full_count = int(
        arcpy.GetCount_management(out_feature_class).getOutput(0))
    arcpy.AddMessage(
        "{0} features were removed because they were full identicals to remaining features."
        .format(before_count - after_full_count))

    # Delete duplicated IDs by taking the most recent FDate--these come from NHD editing process somehow
    arcpy.AddMessage("Deleting older features with duplicated identifiers...")

    # Get a list of distinct IDs that have duplicates
    arcpy.Frequency_analysis(out_feature_class, "in_memory/freqtable",
                             unique_id)
    arcpy.TableSelect_analysis("in_memory/freqtable", "in_memory/dupeslist",
                               '''"FREQUENCY" > 1''')
    count_dupes = int(
        arcpy.GetCount_management("in_memory/dupeslist").getOutput(0))

    #If there are any duplicates, remove them by keeping the one with the latest FDate
    if count_dupes > 0:
        dupe_ids = [
            row[0] for row in arcpy.da.SearchCursor("in_memory/dupeslist", (
                unique_id))
        ]
        dupe_filter = ''' "{}" = '{{}}' '''.format(unique_id)
        for id in dupe_ids:
            dates = [
                row[0] for row in arcpy.da.SearchCursor(
                    out_feature_class, ["FDate"], dupe_filter.format(id))
            ]
            with arcpy.da.UpdateCursor(out_feature_class, [unique_id, "FDate"],
                                       dupe_filter.format(id)) as cursor:
                for row in cursor:
                    if row[1] == max(dates):
                        pass
                    else:
                        cursor.deleteRow()
        after_both_count = int(
            arcpy.GetCount_management(out_feature_class).getOutput(0))
        arcpy.AddMessage(
            "{0} features were removed because they were less recently edited than another feature with the same identifier."
            .format(after_full_count - after_both_count))
    arcpy.Delete_management("in_memory/freqtable")
    arcpy.Delete_management("in_memory/dupeslist")
Exemplo n.º 24
0
def mainFunction(
    geodatabase
):  # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
    try:
        # Logging
        if (enableLogging == "true"):
            # Setup logging
            logger, logMessage = setLogging(logFile)
            # Log start of process
            logger.info("Process started.")

        # --------------------------------------- Start of code --------------------------------------- #

        # Get a list of the feature datasets in the database
        arcpy.env.workspace = geodatabase
        featureDatasetList = arcpy.ListDatasets("", "Feature")
        # Loop through the datasets
        datasetList = []
        for dataset in featureDatasetList:
            datasetList = datasetList + arcpy.ListFeatureClasses(
                "", "", dataset)

        # Get a list of the feature classes in the database
        featureClassList = arcpy.ListFeatureClasses()

        # Get a list of the tables in the database
        tableList = arcpy.ListTables()

        # Get the full list of datasets in the database
        fullDatasetList = featureClassList + tableList

        # Connect directly to SQL server
        sqlConnection = arcpy.ArcSDESQLExecute(geodatabase)

        # Compress the database
        arcpy.Compress_management(geodatabase)

        # Loop through the list of datasets
        for dataset in fullDatasetList:
            # If dataset is an orphaned dataset - "_H" suffix
            if (dataset[-2:].upper() == "_H"):
                # Logging
                if (enableLogging == "true"):
                    logger.info("Orphaned archive dataset found - " + dataset +
                                "...")
                arcpy.AddMessage("Orphaned archive dataset found - " +
                                 dataset + "...")

                baseDataset = dataset[:-2]

                # If the base dataset exists for the orphaned archive
                if arcpy.Exists(baseDataset):
                    # Describe the properties of the dataset
                    desc = arcpy.Describe(baseDataset)

                    # If a feature class
                    if (desc.dataType.lower() == "featureclass"):
                        # Delete all records in the base dataset
                        arcpy.DeleteFeatures_management(baseDataset)

                        # Add GUID to the datasets and load current records from archived dataset to base dataset
                        arcpy.AddField_management(baseDataset, "GUID", "TEXT",
                                                  "", "", "", "", "NULLABLE",
                                                  "NON_REQUIRED", "")
                        arcpy.AddField_management(baseDataset + "_H", "GUID",
                                                  "TEXT", "", "", "", "",
                                                  "NULLABLE", "NON_REQUIRED",
                                                  "")
                        arcpy.CalculateField_management(
                            baseDataset + "_H", "GUID", "CalcGUID()",
                            "PYTHON_9.3",
                            "def CalcGUID():\\n   import uuid\\n   return '{' + str(uuid.uuid4()).upper() + '}'"
                        )
                        arcpy.Select_analysis(
                            baseDataset + "_H", "in_memory\\currentRecords",
                            "GDB_TO_DATE >= '9999-12-31 00:00:00'")
                        arcpy.Append_management("in_memory\\currentRecords",
                                                baseDataset, "NO_TEST", "", "")

                        # Create a copy of the base dataset
                        arcpy.CopyFeatures_management(baseDataset,
                                                      baseDataset + "_Current")

                        # Delete all records in the base dataset
                        arcpy.DeleteFeatures_management(baseDataset)

                    # If a table
                    if (desc.dataType.lower() == "table"):
                        # Delete all records in the base dataset
                        arcpy.DeleteRows_management(baseDataset)

                        # Add GUID to the datasets and load current records from archived dataset to base dataset
                        arcpy.AddField_management(baseDataset, "GUID", "TEXT",
                                                  "", "", "", "", "NULLABLE",
                                                  "NON_REQUIRED", "")
                        arcpy.AddField_management(baseDataset + "_H", "GUID",
                                                  "TEXT", "", "", "", "",
                                                  "NULLABLE", "NON_REQUIRED",
                                                  "")
                        arcpy.CalculateField_management(
                            baseDataset + "_H", "GUID", "CalcGUID()",
                            "PYTHON_9.3",
                            "def CalcGUID():\\n   import uuid\\n   return '{' + str(uuid.uuid4()).upper() + '}'"
                        )
                        arcpy.TableSelect_analysis(
                            baseDataset + "_H", "in_memory\\currentRecords",
                            "GDB_TO_DATE >= '9999-12-31 00:00:00'")
                        arcpy.Append_management("in_memory\\currentRecords",
                                                baseDataset, "NO_TEST", "", "")

                        # Create a copy of the base dataset
                        arcpy.CopyRows_management(baseDataset,
                                                  baseDataset + "_Current", "")

                        # Delete all records in the base dataset
                        arcpy.DeleteRows_management(baseDataset)

                    # If archive rename already exists
                    if arcpy.Exists(baseDataset + "_Archive"):
                        # Delete the archive rename
                        arcpy.Delete_management(baseDataset + "_Archive")

                    # Rename the archive dataset
                    arcpy.Rename_management(dataset, baseDataset + "_Archive",
                                            "")

                    # Load all records from archive dataset into the base dataset
                    arcpy.Append_management(baseDataset + "_Archive",
                                            baseDataset, "NO_TEST", "", "")

                    # Check archiving is enabled
                    isArchived = desc.IsArchived

                    # Enable Archiving if it is not already enabled
                    if isArchived == False:
                        # Logging
                        if (enableLogging == "true"):
                            logger.info("Enabling archiving - " + baseDataset +
                                        "...")
                        arcpy.AddMessage("Enabling archiving - " +
                                         baseDataset + "...")

                        # Enable archiving
                        arcpy.EnableArchiving_management(baseDataset)

                        # If a feature class
                        if (desc.dataType.lower() == "featureclass"):
                            # Delete all records in the base dataset
                            arcpy.DeleteFeatures_management(baseDataset)

                        # If a table
                        if (desc.dataType.lower() == "table"):
                            # Delete all records in the base dataset
                            arcpy.DeleteRows_management(baseDataset)

                        # Logging
                        if (enableLogging == "true"):
                            logger.info(
                                "Loading in orphaned archive dataset records - "
                                + dataset + "...")
                        arcpy.AddMessage(
                            "Loading in orphaned archive dataset records - " +
                            dataset + "...")

                        # Update the dates in the new archive dataset from the old archive dataset based on GUID
                        sqlQuery = "UPDATE " + dataset + " SET " + dataset + ".GDB_FROM_DATE = " + baseDataset + "_Archive" + ".GDB_FROM_DATE, " + dataset + ".GDB_TO_DATE = " + baseDataset + "_Archive" + ".GDB_TO_DATE FROM " + dataset + " INNER JOIN " + baseDataset + "_Archive" + " ON " + dataset + ".GUID = " + baseDataset + "_Archive" + ".GUID"
                        sqlResult = sqlConnection.execute(sqlQuery)

                        # Delete the current records from the archive dataset (those with year of "9999")
                        sqlQuery = "DELETE FROM " + dataset + " WHERE GDB_TO_DATE >= convert(datetime, '9999-12-31 00:00:00',20)"
                        sqlResult = sqlConnection.execute(sqlQuery)

                        # Load all records from current base dataset into the base dataset
                        arcpy.Append_management(baseDataset + "_Current",
                                                baseDataset, "NO_TEST", "", "")

                        # Update the dates in the new archive dataset from the old archive dataset based on GUID
                        sqlQuery = "UPDATE " + dataset + " SET " + dataset + ".GDB_FROM_DATE = " + baseDataset + "_Archive" + ".GDB_FROM_DATE, " + dataset + ".GDB_TO_DATE = " + baseDataset + "_Archive" + ".GDB_TO_DATE FROM " + dataset + " INNER JOIN " + baseDataset + "_Archive" + " ON " + dataset + ".GUID = " + baseDataset + "_Archive" + ".GUID"
                        sqlResult = sqlConnection.execute(sqlQuery)

                        # Rename the current dates from 9999-12-31 23:59:59 (Oracle format) to 9999-12-31 00:00:00 (SQL server format) otherwise it won't finish a record when editing and will end up with a duplicate record
                        sqlQuery = "UPDATE " + dataset + " SET GDB_TO_DATE = convert(datetime, '9999-12-31 00:00:00',20) WHERE GDB_TO_DATE = convert(datetime, '9999-12-31 23:59:59',20)"
                        sqlResult = sqlConnection.execute(sqlQuery)

                        # Delete datasets not needed any longer
                        arcpy.Delete_management(baseDataset + "_Archive")
                        arcpy.Delete_management(baseDataset + "_Current")

                        # Delete GUID field that is not needed anymore
                        arcpy.DeleteField_management(baseDataset, "GUID")

                    elif isArchived == False:
                        # Logging
                        if (enableLogging == "true"):
                            logger.warning("Archiving is already enabled - " +
                                           baseDataset + "...")
                        arcpy.AddWarning("Archiving is already enabled - " +
                                         baseDataset + "...")

                        # Delete/Rename datasets not needed any longer
                        arcpy.Rename_management(baseDataset + "_Archive",
                                                baseDataset + "_H",
                                                "Feature Class")
                        arcpy.Delete_management(baseDataset + "_Current")
        # --------------------------------------- End of code --------------------------------------- #
                else:
                    # Logging
                    if (enableLogging == "true"):
                        logger.warning("No base dataset found - " +
                                       baseDataset + "...")
                    arcpy.AddWarning("No base dataset found - " + baseDataset +
                                     "...")

        # If called from gp tool return the arcpy parameter
        if __name__ == '__main__':
            # Return the output if there is any
            if output:
                arcpy.SetParameterAsText(1, output)
        # Otherwise return the result
        else:
            # Return the output if there is any
            if output:
                return output
        # Logging
        if (enableLogging == "true"):
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        pass
    # If arcpy error
    except arcpy.ExecuteError:
        # Build and show the error message
        errorMessage = arcpy.GetMessages(2)
        arcpy.AddError(errorMessage)
        # Logging
        if (enableLogging == "true"):
            # Log error
            logger.error(errorMessage)
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    # If python error
    except Exception as e:
        errorMessage = ""
        # Build and show the error message
        for i in range(len(e.args)):
            if (i == 0):
                errorMessage = unicode(e.args[i]).encode('utf-8')
            else:
                errorMessage = errorMessage + " " + unicode(
                    e.args[i]).encode('utf-8')
        arcpy.AddError(errorMessage)
        # Logging
        if (enableLogging == "true"):
            # Log error
            logger.error(errorMessage)
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    S = (1000.0 / CN) - 10.0
    Ia = 0.2 * S
    runoffDepth = (prcpInches - Ia)**2 / (prcpInches - Ia + S)
    runoffVolume = (runoffDepth * 0.0254) * A
    runoffAcc = FlowAccumulation(fdrPu, runoffVolume, 'FLOAT')
    del CN, S, Ia, runoffDepth

    arcpy.BuildRasterAttributeTable_management(sinkLarge, True)

    ZonalStatisticsAsTable(sinkLarge, "VALUE", runoffAcc, runoffTable, "DATA",
                           "MAXIMUM")
    ZonalStatisticsAsTable(sinkLarge, "VALUE", storageVolume, storageTable,
                           "DATA", "SUM")

    arcpy.JoinField_management(runoffTable, 'VALUE', storageTable, 'VALUE')
    arcpy.TableSelect_analysis(runoffTable, trueSinkTable, '"SUM" > "MAX"')

    trueSinks = []
    rows = arcpy.SearchCursor(trueSinkTable, '', '', 'Value')
    for row in rows:
        trueSinks.append(row.Value)
    del row, rows

    seeds = arcpy.sa.ExtractByAttributes(sinkLarge,
                                         'VALUE IN ' + str(tuple(trueSinks)))
    nca = Watershed(fdrPu, seeds)
    env.extent = pus
    del seeds

    arcpy.RasterToPolygon_conversion(nca, ncaRaw, False, 'Value')
    # pull out LL features
Exemplo n.º 26
0
arcpy.GetRasterProperties_management("", Property_type, "")

# Process: Add Field
arcpy.AddField_management("", "viewable_pct", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

# Process: Add Field (2)
arcpy.AddField_management(sites_morans_Ar_Merge__5_, "not_viewable_pct", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

# Process: Iterate Rasters
arcpy.IterateRasters_mb(location_of_site_viewshed_files, "f*", "", "NOT_RECURSIVE")

# Process: Select Layer By Attribute
arcpy.SelectLayerByAttribute_management(sites_morans_Ar_Merge__2_, "NEW_SELECTION", "\"feature_no\" = '%Name%'")

# Process: Table Select
arcpy.TableSelect_analysis(cumcost4016, f4016_TableSelect, "")

# Process: Get Field Value
arcpy.GetFieldValue_mb(f4016_TableSelect, "visibility_pct", "String", "0")

# Process: Calculate Field (2)
arcpy.CalculateField_management(sites_morans_Ar_Merge__3_, "not_viewable_pct", Value, "PYTHON_9.3", "")

# Process: Calculate Field (3)
arcpy.CalculateField_management(sites_morans_Ar_Merge, "viewable_pct", "1- [not_viewable_pct]", "VB", "")

# Process: Divide
arcpy.gp.Divide_sa("", N_of_cells_in_raster, Divide_f40165)

#step 11:
# Import arcpy module
    # BE CAREFUll : the value threshold is set to 250, but it can maybe less.
    #               The idea is to find the closest point of the basin boundary !
    outsetnull = SetNull(outextractmask, outextractmask, "VALUE < 250")
    StreamToFeature(outsetnull, outsetnull, tmpf + "tmp.shp", "SIMPLIFY")
    arcpy.FeatureVerticesToPoints_management(tmpf + "tmp.shp",
                                             tmpf + "tmp2.shp", "END")
    arcpy.PointDistance_analysis(tmpf+"tmp2.shp", path_river+"exu_R"+str(i+1)+".shp", \
                                  env.workspace+"RProfils/R"+str(i+1)+"table_acc_dist"+str(i+1))
    # Find FID of the MAX_DISTANCE,
    arcpy.Statistics_analysis(env.workspace+"/RProfils/R"+str(i+1)+"/table_acc_dist"+str(i+1)+".dbf", \
                              tmpf+"stats1", [["DISTANCE", "MAX"]])
    rows = arcpy.SearchCursor(tmpf + "stats1")
    row = rows.next()
    maxd = row.MAX_DISTANCE
    arcpy.TableSelect_analysis(tmpf + "stats1", tmpf + "select1",
                               "DISTANCE = " + str(maxd))
    rows = arcpy.SearchCursor(tmpf + "select1")
    row = rows.next()
    fidr = row.INPUT_FID
    # Extract point with the FID of the MAX_DISTANCE !
    arcpy.FeatureClassToFeatureClass_conversion(tmpf + "tmp2.shp", path_river,
                                                "src_R" + str(i + 1) + ".shp",
                                                "FID = " + str(fidr))

    print("         Calcul length of the basin " + str(i + 1))
    # Find max length of the basin (not the length of the stream): for each watershed,
    #                compute the distance between the outlet and each cell of the
    #                watershed limit. Take the longest
    arcpy.PolygonToLine_management(path_river_r+raster_fnme+"_wshed"+str(i+1)+".shp", \
                tmpf+raster_fnme+"+_wshed_line"+str(i+1)+".shp")
    arcpy.FeatureVerticesToPoints_management(tmpf+raster_fnme+"_wshed_line"+str(i+1)+".shp", \
Exemplo n.º 28
0
def mainFunction(
    featureClasses, tables, csvFiles, csvXYFieldNames, ftpSite, ftpFolder,
    ftpUsername, ftpPassword, gpService
):  # Get parameters from ArcGIS Desktop tool by seperating by comma e.g. (var1 is 1st parameter,var2 is 2nd parameter,var3 is 3rd parameter)
    try:
        # --------------------------------------- Start of code --------------------------------------- #
        # Get the arcgis version
        arcgisVersion = arcpy.GetInstallInfo()['Version']
        # Setup scratch folder differently depending on ArcGIS version
        if (arcgisVersion == "10.0"):
            # Setup geodatabase to load data into in temporary workspace
            arcpy.env.scratchWorkspace = r"F:\Temp"
            tempFolder = arcpy.CreateFolder_management(
                arcpy.env.scratchWorkspace, "WebData-" + str(uuid.uuid1()))
        else:
            # Setup geodatabase to load data into in temporary workspace
            tempFolder = arcpy.CreateFolder_management(
                arcpy.env.scratchFolder, "WebData-" + str(uuid.uuid1()))
        arcpy.CreateFileGDB_management(tempFolder, "Data", "CURRENT")
        geodatabase = os.path.join(str(tempFolder), "Data.gdb")

        arcpy.AddMessage("Copying datasets...")
        # Load the feature classes and tables into a list if input values provided
        if (len(featureClasses) > 0):
            # Remove out apostrophes
            featureclassList = string.split(
                str(featureClasses).replace("'", ""), ";")
            # Loop through the feature classes
            for eachFeatureclass in featureclassList:
                # Create a Describe object from the dataset
                describeDataset = arcpy.Describe(eachFeatureclass)
                # Copy feature class into geodatabase using the same dataset name
                arcpy.CopyFeatures_management(
                    eachFeatureclass,
                    os.path.join(geodatabase, describeDataset.name), "", "0",
                    "0", "0")

        if (len(tables) > 0):
            tableList = string.split(str(tables).replace("'", ""), ";")
            # Loop through of the tables
            for eachTable in tableList:
                # Create a Describe object from the dataset
                describeDataset = arcpy.Describe(eachTable)
                # Copy feature class into geodatabase using the same dataset name
                arcpy.TableSelect_analysis(
                    eachTable, os.path.join(geodatabase, describeDataset.name),
                    "")

        # If CSV files provided
        if (len(csvFiles) > 0):
            csvList = string.split(str(csvFiles).replace("'", ""), ";")
            # Loop through of the CSVs
            for eachCSV in csvList:
                # Create a Describe object from the dataset
                describeDataset = arcpy.Describe(eachCSV)
                datasetName = string.split(describeDataset.name, ".")
                # Change CSV name if starts with a digit
                if datasetName[0].isdigit():
                    datasetName[0] = "Layer" + datasetName[0]
                # Create feature layer and convert to feature class
                csvFields = string.split(
                    str(csvXYFieldNames).replace("'", ""), ",")
                # Copy feature class into geodatabase using the same dataset name
                arcpy.MakeXYEventLayer_management(
                    eachCSV, csvFields[0], csvFields[1], "Layer",
                    "PROJCS['NZGD_2000_New_Zealand_Transverse_Mercator',GEOGCS['GCS_NZGD_2000',DATUM['D_NZGD_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',1600000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',173.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-4020900 1900 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision",
                    "")
                arcpy.CopyFeatures_management(
                    "Layer", os.path.join(geodatabase, datasetName[0]), "",
                    "0", "0", "0")

        # Check input datasets are provided before zipping up
        if ((len(featureClasses) > 0) or (len(tables) > 0)
                or (len(csvFiles) > 0)):
            arcpy.AddMessage("Zipping data...")
            # Setup the zip file
            if (arcgisVersion == "10.0"):
                zipFile = os.path.join(arcpy.env.scratchWorkspace,
                                       "WebData-" + str(uuid.uuid1()) + ".zip")
            else:
                zipFile = os.path.join(arcpy.env.scratchFolder,
                                       "WebData-" + str(uuid.uuid1()) + ".zip")
            zippedFolder = zipfile.ZipFile(zipFile, "w", allowZip64=True)

            # Zip up the geodatabase
            root_len = len(os.path.abspath(str(tempFolder)))
            # For each of the directories in the folder
            for root, dirs, files in os.walk(str(tempFolder)):
                archive_root = os.path.abspath(root)[root_len:]
                # For each file
                for f in files:
                    fullpath = os.path.join(root, f)
                    archive_name = os.path.join(archive_root, f)
                    zippedFolder.write(fullpath, archive_name)
            # Close zip file
            zippedFolder.close()

            # EXTERNAL FUNCTION - Send data to server
            FTPUpload.mainFunction(zipFile, ftpSite, ftpFolder, ftpUsername,
                                   ftpPassword)
        else:
            #--------------------------------------------Logging--------------------------------------------#
            arcpy.AddError("Process stopped: No datasets provided")
            # Log error
            if (enableLogging == "true"):
                logger.error("Process stopped: No datasets provided")
            #-----------------------------------------------------------------------------------------------#

        # Call geoprocessing service to update data on server
        arcpy.AddMessage("Updating data on server...")
        arcpy.ImportToolbox(gpService, "toolbox")
        arcpy.DataUpdateFromZip_toolbox("Existing")

        # --------------------------------------- End of code --------------------------------------- #

        # If called from gp tool return the arcpy parameter
        if __name__ == '__main__':
            # Return the output if there is any
            if output:
                arcpy.SetParameterAsText(1, output)
        # Otherwise return the result
        else:
            # Return the output if there is any
            if output:
                return output
        # Logging
        if (enableLogging == "true"):
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        pass
    # If arcpy error
    except arcpy.ExecuteError:
        # Build and show the error message
        errorMessage = arcpy.GetMessages(2)
        arcpy.AddError(errorMessage)
        # Logging
        if (enableLogging == "true"):
            # Log error
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    # If python error
    except Exception as e:
        errorMessage = ""
        # Build and show the error message
        for i in range(len(e.args)):
            if (i == 0):
                errorMessage = unicode(e.args[i]).encode('utf-8')
            else:
                errorMessage = errorMessage + " " + unicode(
                    e.args[i]).encode('utf-8')
        arcpy.AddError(errorMessage)
        # Logging
        if (enableLogging == "true"):
            # Log error
            logger.error(errorMessage)
            # Log end of process
            logger.info("Process ended.")
            # Remove file handler and close log file
            logging.FileHandler.close(logMessage)
            logger.removeHandler(logMessage)
        if (sendErrorEmail == "true"):
            # Send email
            sendEmail(errorMessage)
    outPutGDBPath = ADSFunctions.makeNewGDBIfDoesntExist(
        workingFolder, outPutGDB)

    individualDCATables = [
        os.path.join(adsGDB, table) for table in arcpy.ListTables()
    ]
    for individualTable in individualDCATables:
        print('Working on', table)
        table = '{}_View'.format(os.path.basename(individualTable))
        arcpy.MakeTableView_management(individualTable, table)
        whereStatement = ADSFunctions.makeDamageCodeWhereStatement(
            damgeCodesList)
        if ADSFunctions.checkForDamageCodes(table, whereStatement):
            selectTableName = '{}_Selected'.format(os.path.basename(table))
            selectTablePath = os.path.join(scratchGDB, selectTableName)
            arcpy.TableSelect_analysis(table, selectTablePath, whereStatement)
            duplicateIDS = ADSFunctions.returnAllValuesFromField(
                selectTableName, 'ORIGINAL_ID')
            arcpy.CalculateField_management(selectTableName, 'DUPLICATE',
                                            'None', 'PYTHON_9.3')

            duplicateIDS = ADSFunctions.returnDuplicates(duplicateIDS)
            mergedTableName = '{}_{}'.format(
                region,
                table.replace('Expanded', 'Merged').replace('_View', ''))
            featureClassName = '{}_{}_Merged'.format(
                region,
                table.replace('_Expanded', '').replace('_View', ''))
            if duplicateIDS:
                arcpy.CalculateField_management(
                    selectTableName, 'DUPLICATE',
Exemplo n.º 30
0
arcpy.MakeXYEventLayer_management(
    Estacao_Vazia, "MdaLongitude", "MdaLatitude", EstAproces,
    "GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8,98315284119521E-09;0,001;0,001;IsHighPrecision",
    "")

# Process: Add Join
arcpy.AddJoin_management(Est_BI, "IdeRegistroPeriodo", EstAproces,
                         "IdeRegistroPeriodo", "KEEP_ALL")

#print ListFields
field_names = [f.name for f in arcpy.ListFields(Est_Join)]
print field_names

# Process: Table Select
arcpy.TableSelect_analysis(
    Est_Join, Per_a_proc,
    'Estacao_Vazia_Features.DthInicioPeriodo IS NULL AND StageDW.SFG.StgEstacaoCurvaPermanencia_Features.MdaLatitude IS NOT NULL'
)

#Make XY Layer de período a processar

arcpy.MakeXYEventLayer_management(
    Per_a_proc, "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_MdaLongitude",
    "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_MdaLatitude", Per_fc,
    "GEOGCS['GCS_SIRGAS_2000',DATUM['D_SIRGAS_2000',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8,98315284119521E-09;0,001;0,001;IsHighPrecision",
    "")

#Split by attributes

arcpy.SplitByAttributes_analysis(
    Per_fc, WsPeriodo,
    "StageDW_SFG_StgEstacaoCurvaPermanencia_Features_DthInicioPeriodo;StageDW_SFG_StgEstacaoCurvaPermanencia_Features_DthFimPeriodo"