Beispiel #1
0
def dmbiologist(pivotTable):
    pivotTableCOPY = arcpy.TableToTable_conversion(pivotTable, env.workspace,
                                                   "pivotTableCOPY")
    with arcpy.da.UpdateCursor(
            pivotTableCOPY,
        ["dmproc", "total_records", "survey_site_dmstat"]) as cursor:
        for row in cursor:
            if row[0] == row[1] and (row[2] == "dmproc" or row[2] is None):
                cursor.deleteRow()
            else:
                pass

    refname = [
        "hna", "geo", "lep", "eic", "tra", "dwa", "yea", "zim", "eaz", "alb",
        "kun", "mcp", "mil", "wis", "gip", "fur", "wal", "wat", "woo", "gle",
        "gru", "sch", "shc", "dav"
    ]
    createnames = [
        "ahnatkovich", "bgeorgic", "bleppo", "ceichelberger", "ctracey",
        "dwatts", "dyeany", "ezimmerman", "ezimmerman", "jalbert", "jkunsman",
        "jmcpherson", "rmiller", "jwisgo", "kgipe", "mfuredi", "mwalsh",
        "dwatts", "pwoods", "rgleason", "sgrund", "sschuette", "sschuette",
        "ezimmerman"
    ]
    for ref, name in zip(refname, createnames):
        with arcpy.da.UpdateCursor(pivotTableCOPY,
                                   ["refcode", "created_by"]) as cursor:
            for row in cursor:
                if row[0] is None or row[1] is None:
                    pass
                else:
                    if (row[1].lower() == "arcgis" or row[1].lower()
                            == "tjadmin" or row[1].lower() == "administrator"
                            or row[1].lower()
                            == "bgeorgic") and ref in row[0].lower():
                        row[1] = name
                        cursor.updateRow(row)

    outPath = "P:\\Conservation Programs\\Natural Heritage Program\\" \
    "Data Management\\Instructions, procedures and documentation\\FIND\\" \
    "FIND_2016\\Reports\\Biologist Status Reports"

    with arcpy.da.SearchCursor(pivotTableCOPY, "created_by") as cursor:
        biologists = sorted({row[0] for row in cursor})

    for biologist in biologists:
        if biologist is None:
            pass
        else:
            expression = "created_by = '{}'".format(biologist)
            tableTEMP = arcpy.TableToTable_conversion(pivotTableCOPY,
                                                      "in_memory", "tableTEMP",
                                                      expression)
            filename = biologist + " - " + "FIND Status Report " + time.strftime(
                "%d%b%Y") + ".xls"
            outTable = os.path.join(outPath, filename)
            arcpy.TableToExcel_conversion(tableTEMP, outTable)
    print "DM Biologist Report Created!"
Beispiel #2
0
def snap_transfers_to_network(transfer_shp_f, node_shp):
    arcpy.AddXY_management(node_shp)
    arcpy.TableToTable_conversion(node_shp, "C:/GIS/", "node.dbf", "", "", "")
    arcpy.TableToTable_conversion(transfer_shp, "C:/GIS/", "transfer.dbf", "",
                                  "", "")
    arcpy.JoinField_management("C:/GIS/transfer.dbf", "nearNID",
                               "C:/GIS/node.dbf", "ID", ["POINT_X", "POINT_Y"])
    # arcpy.Delete_management(transfer_shp_snapped)
    arcpy.MakeXYEventLayer_management("C:/GIS/transfer.dbf", "POINT_X",
                                      "POINT_Y", "new_transfer")
    arcpy.CopyFeatures_management("new_transfer", transfer_shp)
def dbf_2csv(GPS_date):
    for i in range(6):
        inputpath_141 = 'E:/MapMatch/Intersect_analysis/bh_141/201612%d/bh141_201612%d_%02d.dbf' % (
            GPS_date, GPS_date, i)
        outputpath_141 = 'E:/MapMatch/dbf_2csv/bh_141/201612%d' % (GPS_date)
        inputpath_142 = 'E:/MapMatch/Intersect_analysis/bh_142/201612%d/bh142_201612%d_%02d.dbf' % (
            GPS_date, GPS_date, i)
        outputpath_142 = 'E:/MapMatch/dbf_2csv/bh_142/201612%d' % (GPS_date)
        arcpy.TableToTable_conversion(
            inputpath_141, outputpath_141,
            'bh141_201612%d_%02d.csv' % (GPS_date, i))
        arcpy.TableToTable_conversion(
            inputpath_142, outputpath_142,
            'bh142_201612%d_%02d.csv' % (GPS_date, i))
Beispiel #4
0
def Iniciar_valor():
    path=os.path.dirname(os.path.abspath(__file__))
    path_temp=path+"\\temp"
    arcpy.env.overwriteOutput = True
    #Datos_xls = "\Archivos Base de Datos\ENLACES.xls"
    arcpy.env.workspace = path+"/temp"

    if arcpy.Exists(path+"\\temp\\conversion.dbf"):
        arcpy.Delete_management(path+"\\tempo\\conversion.dbf")
    if arcpy.Exists(path+"\\temp\\conversion_1.dbf"):
        arcpy.Delete_management(path+"\\tempo\\conversion_1.dbf")
    if arcpy.Exists(path+"\\temp\\conversion_12.dbf"):
        arcpy.Delete_management(path+"\\tempo\\conversion_12.dbf")
    if arcpy.Exists(path+"\\temp\\salida.dbf"):
        arcpy.Delete_management(path+"\\tempo\\salida.dbf")
    if arcpy.Exists(path + "\\temp\\salida_1.dbf"):
        arcpy.Delete_management(path + "\\tempo\\salida_1.dbf")

    conversion = "conversion_1"
    #arcpy.ExcelToTable_conversion(path+Datos_xls, "conversion11_1")
    arcpy.TableToTable_conversion("table.dbf",path+"/temp",conversion)
    arcpy.TableToTable_conversion("table.dbf", path+"/temp", "salida_1")
    #Datos_xls = "\Archivos Base de Datos\DATA_PRUEBA.xls"
    #Datos_xls1 = "\Archivos Base de Datos\ENLACES.xls"
    arcpy.env.workspace = path + "/temp"
    conversion = "conversion"

    #arcpy.ExcelToTable_conversion(path + Datos_xls, "conversion11")
    arcpy.TableToTable_conversion(path_temp+"\\table.dbf", path + "/temp", conversion)
    arcpy.TableToTable_conversion(path_temp+"\\table.dbf", path + "/temp", "salida")
    arcpy.AddField_management("salida.dbf", "LATITUD_GD", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management("salida.dbf", "LONGITUD_GD", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

    expression = "parse_dms(!Latitud!)"
    expression1 = "parse_dms(!Longitud!)"
    codeblock = """def parse_dms(dms):
                import re
                try:
                    parts=re.split('[^\d\w\.]+',dms.encode('utf8'))
                    dd=float(parts[0])+float(parts[1])/60.0+float(parts[2])/(3600.0);
                    #print(parts[0]+","+","+parts[1]+","+parts[2]+","+parts[3])
                    if parts[3]=='W' or parts[3]=='S':
                        dd*=-1
                    return dd
                except Exception as e:
                    return 0"""

    arcpy.CalculateField_management("salida.dbf", "LATITUD_GD", expression, "PYTHON_9.3", codeblock)
    arcpy.CalculateField_management("salida.dbf", "LONGITUD_GD", expression1, "PYTHON_9.3", codeblock)
def zipCalc(inputDF, outLoc, commField, zipField):
    #make table into a csv to count unique town names
    outTable = "toCountTEMP.csv"
    arcpy.TableToTable_conversion(str(inputDF) + ".dbf", outLoc, outTable)
    readTable = str(outLoc) + "/" + str(outTable)
    tempDF = pd.read_csv(readTable)
    tempCol = tempDF[commField]
    uniqueZips = tempCol.unique()
    zipRef = pd.read_csv(outLoc+'/zipExp.csv')
    #list of values that can be ignored and not assigned a zip
    ignoreNames = ["SECTOR", "CELLULAR", "NOT AVAILABLE", "VOIP", "UNKNOWN"]
    
    for i in uniqueZips:
        var = str(commField)
        expression = "\"" + var + "\" = " + "'" + str(i) + "'" #"var" = 'zip'
        arcpy.MakeTableView_management(inputDF, "tempView")
        arcpy.SelectLayerByAttribute_management("tempView", "NEW_SELECTION", expression)
        #check if there's a matching town name in the zip code file
        try:
            temp = zipRef.loc[zipRef.NAME == str(i).title(), "POSTAL"]
            #checks if the community can have more than one zip code
            if temp.size > 1:
                print(str(i) + " can have multiple zip codes")
                zipCode = raw_input("Please enter the zip code: ")
            else:
                zipCode = str(zipRef.loc[zipRef.NAME == str(i).title(), "POSTAL"].values[0])
                zipCode = zipCode[0:5]
        except:
            if str(i).upper() in ignoreNames:
                continue
            else:
                zipCode = raw_input("Please enter the zip code for " + str(i) + ": ")
        arcpy.CalculateField_management("tempView", "Zip", "'" + zipCode + "'", "PYTHON_9.3")
        print("Zip code for " + i + " assigned")
    removeRow(inputDF, "Zip", "99999")
Beispiel #6
0
def mergeFeatureClasses(
):  #arcpy.management.Merge(mfileList, r"D:\data\covid\MyProject.gdb\capstoneStates", '', "NO_SOURCE_INFO")

    #r"D:\data\covid\CO_20201103\\"+"Ugn"+tdate+".gdb\\"+"Ugn"+tdate+"EHSA
    try:
        for x in capstoneStates:
            mfileList.append(r"D:\data\covid\\" + "Ugn" + tdate + "\\" +
                             "Ugn" + tdate + ".gdb\\" + "cases" +
                             str(int(tdate) - 1) + "EHSA")
        for file in mfileList:
            print(file)
        if arcpy.Exists(r"D:\data\covid\capstoneStates.gdb\capstoneStates"):
            arcpy.Delete_management(
                r"D:\data\covid\capstoneStates.gdb\capStates")
            print(
                "Exists D:\data\covid\capstoneStates.gdb\capstoneStates",
                arcpy.Exists(
                    r"D:\data\covid\capstoneStates.gdb\capstoneStates"))
        arcpy.management.Merge(
            mfileList,
            r"D:\data\covid\capstoneStates.gdb\capstoneStates" + "Ugn" + tdate,
            '', "NO_SOURCE_INFO")
        arcpy.management.AddIndex(
            r"D:\data\covid\capstoneStates.gdb\capstoneStates" + "Ugn" + tdate,
            "GEONUM", "GEONUM", "NON_UNIQUE", "NON_ASCENDING")
        patternCount = os.path.join(r"D:\data\covid",
                                    "patternCount" + tdate + ".csv")
        arcpy.TableToTable_conversion(
            patternCount, r"D:\data\covid\capstoneStates.gdb",
            "patternCount" + "Ugn" + tdate)  #Table To Table conversion

    except:
        getTraceback()
Beispiel #7
0
def main():
    startTime = dt.datetime.now()
    print("Script run at {0}.".format(startTime))
    
    p = Path(__file__).parents[0]
    
    # `Scenario_1024` should be changed to the appropriate scenario number 
    # output by Emme.
    links_shapefile = os.path.join(str(p), 'New_Project', 'Media', 
                                           'Scenario_1024', 'emme_links.shp') 
    in_field = 'ID'
    join_table = os.path.join(str(p), 'volumes.csv')
    join_field = 'UNIQUEID'


    arcpy.TableToTable_conversion(join_table, str(p), 'volumes_converted.dbf' )
    converted_table = os.path.join(str(p), 'volumes_converted.dbf')

    
    joined_file = arcpy.JoinField_management(
                                            links_shapefile,
                                            in_field,
                                            converted_table,
                                            join_field)

    arcpy.FeatureClassToShapefile_conversion(joined_file, os.path.join(str(p)))
    
    endTime = dt.datetime.now()
    print("Script finished in {0}.".format(endTime - startTime))
Beispiel #8
0
    def responseToTable(self, table):
        '''
        :param table: Feature o tabla que se esta evaluando 
        :return: Crea un tabla temporal en el SCRATCH gdb
        '''
        array = [
            x.get('attributes') for x in self.jsonresponse.get('features')
        ]
        self.identifiers = [[x.get('globalid'),
                             x.get('objectid')] for x in array]
        self.identifiers.sort(key=lambda x: x[1])

        pth = os.path.join(self.scratch, 'temp')
        feature = arcpy.AsShape(self.jsonresponse, True)
        if table.split("_")[0] == "GPT":
            self.copy = arcpy.CopyFeatures_management(feature, pth)
            arcpy.management.AddField(self.copy, "EVENTID", "GUID")
            n = 0
            with arcpy.da.UpdateCursor(self.copy, ["EVENTID"]) as cursorU:
                for row in cursorU:
                    mmm = self.identifiers[n][0].upper()
                    row[0] = u'{}'.format('{' + mmm + '}')
                    cursorU.updateRow(row)
                    n = n + 1
            del cursorU
            self.listaCDMTRA = [[x[0], x[1]] for x in arcpy.da.SearchCursor(
                self.copy, ["EVENTID", self.CDMTRA])]
            arcpy.DeleteField_management(self.copy, "globalid")

        if table.split("_")[0] == "TB":
            self.copy = arcpy.TableToTable_conversion(
                feature, self.scratch,
                '{}a{:.5}'.format(table, str(uuid.uuid4())))
Beispiel #9
0
def CreateXwalkTable(UPConfig, ZonalDataset, ZoneField, MaxDist):
    ##Makes a Xwalk table between zone ID and BaseGeom ID
    #set the workspace
    UPGDB = os.path.join(UPConfig['paths']['dbpath'],
                         UPConfig['paths']['dbname'])
    env.workspace = UPGDB
    env.overwriteOutput = True

    ##create a crosswalk table between zones and BaseGeomIDs
    #join base geom centroids to zonal layer
    arcpy.SpatialJoin_analysis(
        os.path.join(UPConfig['paths']['dbpath'], UPConfig['paths']['dbname'],
                     UPConfig['BaseGeom_cent']),
        os.path.join(UPConfig['paths']['dbpath'], UPConfig['paths']['dbname'],
                     ZonalDataset), "in_memory/intersect", "JOIN_ONE_TO_ONE",
        "KEEP_ALL", "", "CLOSEST", MaxDist)

    #create XWalk table
    if arcpy.Exists('upa_ZoneXWalk'):
        arcpy.Delete_management('upa_ZoneXWalk')
    arcpy.TableToTable_conversion('in_memory/intersect', UPGDB,
                                  'upa_ZoneXWalk')

    #delete unneeded fields
    BaseGeomID = UPConfig['BaseGeom_id']
    Fields = arcpy.ListFields("upa_ZoneXWalk")
    FieldList = []
    for Field in Fields:
        if not Field.required:
            if not (Field.name == BaseGeomID or Field.name == ZoneField):
                FieldList.append(Field.name)
    arcpy.DeleteField_management("upa_ZoneXWalk", FieldList)
Beispiel #10
0
def AddressCheck(table, Workspace, addr):
    arcpy.AddField_management(table, "FieldError", "TEXT")
    rows = arcpy.UpdateCursor(table)
    check = False
    try:
        for row in rows:
            if "-" in row.getValue(addr):
                check = True
                row.FieldError = "ERROR"
                rows.updateRow(row)
            else:
                row.FieldError = "None"
                rows.updateRow(row)
            del row
        del rows
    except:
        del rows

    if check == True:
        arcpy.AddMessage("Field errors found!")
    expression = arcpy.AddFieldDelimiters(table, "FieldError") + " = 'ERROR'"
    arcpy.TableToTable_conversion(table, Workspace, "Address_Errors.dbf",
                                  expression)
    rows = arcpy.UpdateCursor(table)
    for row in rows:
        if row.getValue("FieldError") == "ERROR":
            rows.deleteRow(row)
            del row
    del rows
Beispiel #11
0
def createSumTbl(table):
    # Calculate the Attribute Fields for the Cloned DCN Data
    arcpy.Statistics_analysis(table, table + "_area_v1", [[
        "Shape_Area",
        "SUM",
    ]], "SUMAREA")
    arcpy.Statistics_analysis(table, table + "_provtot_v1", [[
        "SUMPROV",
        "FIRST",
    ]], "SUMPROV")
    arcpy.AddField_management(table + "_area_v1", "VERSION", "TEXT", "", "",
                              "50", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.AddField_management(table + "_area_v1", "AREADBL", "DOUBLE", "", "",
                              "", "", "NULLABLE", "NON_REQUIRED", "")
    arcpy.CalculateField_management(table + "_area_v1", "AREADBL",
                                    "round(!SUM_Shape_Area!/1000000, 0)",
                                    "PYTHON", "")
    arcpy.CalculateField_management(table + "_area_v1", "SUMAREA",
                                    calcSumFldArea, "PYTHON", "")
    arcpy.AddField_management(table + "_provtot_v1", "VERSION", "TEXT", "", "",
                              "50", "", "NULLABLE", "NON_REQUIRED", "")
    if tblName == "tbl_temp_web_prov_tot_cur":
        arcpy.TableToTable_conversion(table + "_area_v1", outFolder,
                                      tblFinalSum)
        arcpy.CalculateField_management(table + "_area_v1", "VERSION",
                                        "'" + "CURRENT" + "'", "PYTHON")
        arcpy.CalculateField_management(table + "_provtot_v1", "VERSION",
                                        "'" + "CURRENT" + "'", "PYTHON")

    elif tblName == "tbl_temp_web_prov_tot_prev":
        arcpy.CalculateField_management(table + "_area_v1", "VERSION",
                                        "'" + "PREVIOUS" + "'", "PYTHON")
        arcpy.CalculateField_management(table + "_provtot_v1", "VERSION",
                                        "'" + "PREVIOUS" + "'", "PYTHON")
Beispiel #12
0
def patternEHSA():
    try:
        arcpy.env.workspace = r"D:\data\covid\cases\aug_Oct2020\aug_Oct2020.gdb"
        newfolder = r"D:\data\covid\cases\aug_Oct2020"
        feature_classes = arcpy.ListFeatureClasses()
        df3 = pd.DataFrame(columns=['GEONUM', 'CATEGORY', 'PATTERN', 'DATE'])
        for fc in feature_classes:
            if fc.title() != ('Ctyprj'):
                print(fc.title())
                fileDate = dt.datetime.strptime(fc[5:13], '%Y%m%d')
                field_names = [
                    i.name for i in arcpy.ListFields(fc) if i.type != 'OID'
                ]
                # Open a cursor to extract results from stats table
                cursor = arcpy.da.SearchCursor(fc, field_names)
                # Create a pandas dataframe to display results
                df = pd.DataFrame(data=[row for row in cursor],
                                  columns=field_names,
                                  dtype=int)
                df2 = df[['GEONUM', 'CATEGORY', 'PATTERN']]
                df2['DATE'] = fileDate
                df3 = df3.append(df2)
#        df3.rename(columns={"GEONUM": "FIPS"},inplace=True)
        patternCSV = os.path.join(newfolder, 'patternEHSA' + tdate + ".csv")
        df3.to_csv(patternCSV, sep=',', index=None, header=1)
        arcpy.TableToTable_conversion(patternCSV, arcpy.env.workspace,
                                      os.path.basename(patternCSV).replace(
                                          '.csv',
                                          ''))  #Table To Table conversion
        return
    except:
        getTraceback()
Beispiel #13
0
def zone(zone, raster, temp_table, outpath_final, dbf):
    start_zone = datetime.datetime.now()
    arcpy.MakeRasterLayer_management(
        r'L:\Workspace\ESA_Species\Step3\Step3_Proposal\GAP\layerfiles_use_to_change_legend_in _ArcMap\6EcolSys_landuse.lyr',
        "snap")
    arcpy.env.snapRaster = "snap"

    arcpy.CreateTable_management("in_memory", temp_table)
    temp = "in_memory" + os.sep + temp_table
    arcpy.env.overwriteOutput = True
    arcpy.gp.ZonalHistogram_sa(zone, "Value", raster, temp)
    try:
        arcpy.AddField_management(temp, 'TableID', "TEXT", "", "", "100", "",
                                  "NULLABLE", "NON_REQUIRED", "")
        arcpy.CalculateField_management(temp, "TableID", "!OBJECTID!",
                                        "PYTHON_9.3", "")
    except:
        pass
    print "Completed Zonal Histogram"

    try:
        arcpy.TableToTable_conversion(temp, outpath_final, dbf)
    except:
        pass
    list_fields = [f.name for f in arcpy.ListFields(temp)]
    att_array = arcpy.da.TableToNumPyArray((temp), list_fields)
    att_df = pd.DataFrame(data=att_array)

    return att_df, start_zone
Beispiel #14
0
def FCtoTXT(fc, txtloc, outtxt):
    env.workspace = txtloc
    arcpy.env.overwriteOutput = True
    fc_view = arcpy.MakeTableView_management(fc, 'fc_view')
    arcpy.TableToTable_conversion(fc_view, txtloc, outtxt)
    del fc_view
    print("text file {} created".format(outtxt))
Beispiel #15
0
    def add_itm_cor_to_csv_file(self):
        fc_file = self.name_gis
        arcpy.management.XYTableToPoint(
            self.gps_file,
            fc_file,
            "lon",
            "lat",
            coordinate_system=arcpy.SpatialReference(4326))

        obj_name_pro = fc_file + '_pro'

        # Process: Project
        arcpy.Project_management(
            in_dataset=fc_file,
            out_dataset=obj_name_pro,
            out_coor_system=arcpy.SpatialReference(2039),
            transform_method="WGS_1984_To_Israel_CoordFrame",
            in_coor_system=arcpy.SpatialReference(4326),
            preserve_shape="NO_PRESERVE_SHAPE",
            max_deviation="",
            vertical="NO_VERTICAL")

        # Process: Add Geometry Attributes
        arcpy.AddGeometryAttributes_management(
            Input_Features=obj_name_pro,
            Geometry_Properties="POINT_X_Y_Z_M",
            Length_Unit="METERS",
            Area_Unit="",
            Coordinate_System="")
        # Process: Table To Table
        arcpy.TableToTable_conversion(obj_name_pro,
                                      self.workspace_csv_progress,
                                      fc_file + 'itm.csv')
Beispiel #16
0
def oracleXY2oracle(sourceLyr, tempName, targetLyr, X, Y):
    now = datetime.datetime.now()
    arcpy.AddMessage('--- Przetwarzanie zasilania Oracle [' +
                     now.strftime("%Y/%m/%d %H:%M:%S") + '] ---')

    inputs = [oracle_hydro_sdo + "\\" + sourceLyr]
    temps = [myPath + "tempGDB.gdb\\" + tempName]
    #targets = [baza4Connector + "\\" + targetLyr]
    targets = [oracle_hydro_sdo + "\\" + targetLyr]
    fieldX = [X]
    fieldY = [Y]
    events = ["tempLyr"]

    i = 0
    for n in inputs:

        arcpy.AddMessage('  --> Kopiowanie tabeli tempTable' + tempName)
        arcpy.TableToTable_conversion(n, myPath + "tempGDB.gdb",
                                      "tempTable" + tempName)

        arcpy.AddMessage('  --> Tworzenie warstwy przestrzennej ' + temps[i])
        arcpy.MakeXYEventLayer_management(
            myPath + "tempGDB.gdb\\tempTable" + tempName, fieldX[i], fieldY[i],
            events[i], spatialRef, "")
        arcpy.FeatureClassToFeatureClass_conversion(events[i],
                                                    myPath + "tempGDB.gdb\\",
                                                    tempName)

        arcpy.AddMessage('  --> Usuwanie danych z ' + targets[i])
        arcpy.DeleteRows_management(targets[i])

        arcpy.AddMessage('  --> Zasilanie danych do ' + targets[i])
        arcpy.Append_management(temps[i], targets[i], "NO_TEST", "", "")

        i = i + 1
def fields_to_table(input_data, fields, workspace, output_table):
    '''function creates field mappings object then uses the table to tables
    conversion tool to export a table to your gdb from your FC fields
    :param input_data: your feature class in your geodatabase
    :param fields: a list of your field names (string)
    :param workspace: your arcpy.env.workspace
    :output_table: the name of your output table'''

    #create empty field mapping object
    fms = arcpy.FieldMappings()

    #define fieldmaps and add to field mapping objects (fms)
    for field in fields:
        fm = arcpy.FieldMap()
        fm.addInputField(input_data, field)
        output_field = fm.outputField
        output_field.name = field
        fm.outputField = output_field

        # add the field map to the field mappings object
        fms.addFieldMap(fm)

    #print field map object
    print(fms)

    #run table to table conversion tool with field mappings parameter
    arcpy.TableToTable_conversion(input_data, workspace, output_table, "", fms)
    print("converted %s to table" %(input_data))
    def test_table_to_polygon_w_grouping(self):
        '''Test Table To Polygon using Name field as the grouping Line Field'''

        Configuration.Logger.info(".....TableToPolygonTestCase.test_table_to_polygon_w_grouping")

        # Delete the output feature class if already exists
        if arcpy.Exists(self.outputPolygons) :
            arcpy.Delete_management(self.outputPolygons)

        # Note: tool fails when run with input "Name" and "Vsort" fields as params
        groupingFieldName = 'Name'
        toolOutput = arcpy.TableToPolygon_mt(self.inputTable, "DD_2", "POINT_X", "POINT_Y", self.outputPolygons, groupingFieldName, "Vsort")

        # 1: Check the expected return value
        self.assertIsNotNone(toolOutput, "No output returned from tool")
        outputOut = toolOutput.getOutput(0)
        self.assertEqual(self.outputPolygons, outputOut, "Unexpected return value from tool")
        self.assertTrue(arcpy.Exists(self.outputPolygons), "Output features do not exist or were not created")

        # Process to check tool results for Grouping
        # Step 1: Make in_memory table to get frequency of
        inMemTable = arcpy.TableToTable_conversion(self.inputTable, "in_memory", "TableToPolygon_single_In_Mem")

        # Step 2: Get the frequency of unique "group values" in the input table
        # Get Frequency of the unique names in the input table
        freqInputTable = arcpy.Frequency_analysis(inMemTable, "in_memory\\CountOfUniqueNames", groupingFieldName, "")

        # Get Count of the unique names
        freqTableCount = arcpy.GetCount_management(freqInputTable)
        expectedFeatureCount = int(freqTableCount.getOutput(0))

        polygonCount = int(arcpy.GetCount_management(self.outputPolygons).getOutput(0))
        self.assertEqual(polygonCount, expectedFeatureCount, "Expected %s features, but got %s" % (str(expectedFeatureCount), str(polygonCount)))

        return
Beispiel #19
0
def attribute_from_vector(feat, obj, field, id_index):
    gdb_path = os.path.dirname(os.path.abspath(feat))
    arcpy.env.workspace = '{}'.format(os.path.dirname(os.path.abspath(gdb_path)))
    arcpy.env.overwriteOutput = True
    arcpy.env.qualifiedFieldNames = False

    temp_folder = r'{}\temp'.format(os.path.dirname(os.path.abspath(gdb_path)))

    if os.path.exists(temp_folder):
        gp.AddMessage('folder temp already exists')
    else:
        gp.AddMessage('Making temp folder')
        os.mkdir(temp_folder)

    # check if the 'to' field exists and if not found then add it
    if len(arcpy.ListFields(feat, field)) == 0:
        gp.AddMessage('the {} field does not exist'.format(field))
    else:
        gp.AddMessage('The {} field already exits ... Deleting'.format(field))
        arcpy.DeleteField_management(feat, field)

    gp.AddMessage('Intersect Analysis')
    arcpy.Intersect_analysis(in_features='{} #;{} #'.format(feat, obj),
                             out_feature_class='{}/UTTL_Basins_Intersect'.format(temp_folder),
                             join_attributes='NO_FID', cluster_tolerance="-1 Unknown",
                             output_type='INPUT')

    arcpy.AddField_management('{}/UTTL_Basins_Intersect.shp'.format(temp_folder), "Up_Area", "DOUBLE")
    exp = "!SHAPE.AREA@SQUAREKILOMETERS!"
    arcpy.CalculateField_management('{}/UTTL_Basins_Intersect.shp'.format(temp_folder), "Up_Area", exp, "PYTHON_9.3")

    arcpy.TableToTable_conversion('{}/UTTL_Basins_Intersect.shp'.format(temp_folder), temp_folder, 'UTTL_Basins_Intersect.csv')

    field = check_arcmap_field(field)
    df_table = pd.read_csv('{}/UTTL_Basins_Intersect.csv'.format(temp_folder))[[id_index, field, 'Up_Area']]

    idx = df_table.groupby(by=id_index, sort=False)['Up_Area'].transform(max) == df_table['Up_Area']
    df_sel_table = df_table[idx]

    df_join_table = df_sel_table.copy()
    df_join_table['Code'] = [str(i) for i in df_sel_table[id_index]]
    df_join_table.drop(labels=[id_index, 'Up_Area'], axis=1, inplace=True)
    df_join_table.columns = [field, id_index]

    df_join_table.to_csv('{}/UTTL_Basins_NewVectorAttribute.csv'.format(temp_folder))

    arcpy.AddField_management(feat, field, 'TEXT')

    x = np.array(np.rec.fromrecords(df_join_table.values))
    names = map(str, df_join_table.dtypes.index.tolist())
    x.dtype.names = tuple(names)
    arcpy.da.NumPyArrayToTable(x, r'{}\{}'.format(gdb_path, field))

    # joins are annoying but you #should# be able to do it this way
    # joins must be performed on a Layer or Table View object...
    arcpy.MakeFeatureLayer_management(feat, 'Layer')
    arcpy.AddJoin_management('Layer', id_index, r'{}\{}'.format(gdb_path, field), id_index)
    arcpy.CalculateField_management('Layer', field, '!{}.{}!'.format(field, field), 'PYTHON_9.3')
    gp.AddMessage('attribute {} added successfully'.format(field))
Beispiel #20
0
def Export_Level_1(output_workspace, flowline_points, xs, xs_points, features):
    # Set environment variables 
    arcpy.env.overwriteOutput = True
    
    # Create the output folder
    parent_folder = os.path.dirname(output_workspace)
    output_workspace_basename = os.path.splitext(os.path.basename(output_workspace))[0]
    exports_folder = os.path.join(parent_folder, "exports")
    archive_folder = os.path.join(exports_folder, output_workspace_basename)
    
    arcpy.AddMessage("parent_folder: {}".format(parent_folder))
    arcpy.AddMessage("output_workspace_basename: {}".format(output_workspace_basename))
    arcpy.AddMessage("exports_folder: {}".format(exports_folder))
    arcpy.AddMessage("archive_folder: {}".format(archive_folder))
    
    if not os.path.exists(exports_folder):
        os.makedirs(exports_folder)
        arcpy.AddMessage("Created folder: {}".format(exports_folder))
        
    if not os.path.exists(archive_folder):
        os.makedirs(archive_folder)
        arcpy.AddMessage("Created folder: {}".format(archive_folder))
        
    if flowline_points:
        arcpy.TableToTable_conversion(in_rows = flowline_points,
                                      out_path = archive_folder,
                                      out_name = "flowline_points.csv")
                                      
    if xs:
        xs_basename = os.path.splitext(os.path.basename(xs))[0]
        xs_csv = "{}.csv".format(xs_basename)
        arcpy.TableToTable_conversion(in_rows = xs,
                                      out_path = archive_folder,
                                      out_name = xs_csv)
                                      
    if xs_points:
        xs_points_basename = os.path.splitext(os.path.basename(xs_points))[0]
        xs_points_csv = "{}.csv".format(xs_points_basename)
        arcpy.TableToTable_conversion(in_rows = xs_points,
                                      out_path = archive_folder,
                                      out_name = xs_points_csv)
    
    if features:
        arcpy.TableToTable_conversion(in_rows = features,
                                      out_path = archive_folder,
                                      out_name = "features.csv")
Beispiel #21
0
def BackupAttachmentsData(AttachmentTable):
    '''
    Function that creates a backup attachment table in a 
    local database in the same location as this script.
    '''
    Database = r"C:\Users\jamesd26.NETID\Desktop\GitHub\CampusEngineering\Python\Confidence Tests\Backup.gdb"
    OutTable = 'Attachments_' + str(datetime.date.today()).replace('-', '_')
    arcpy.TableToTable_conversion(AttachmentTable, Database, OutTable)
Beispiel #22
0
def create_braut_field():
    # Create a temporary table in FGDB
    arcpy.TableToTable_conversion(InputFileTID, env.workspace, "temp_id_table")
    temp_table = env.workspace + "/temp_id_table"  # needed because of tool bug
    # add BRAUT field with a join
    arcpy.JoinField_management(OutputBrautTemp, "OBJECTID", temp_table,
                               "File_ID", ["BRAUT"])
    arcpy.Delete_management(temp_table)  # Delete temporary table
Beispiel #23
0
def convertDataset(dataElementType, sourceTable, workspace, targetName,
                   whereClause):
    if dataElementType == "DEFeatureClass":
        arcpy.FeatureClassToFeatureClass_conversion(sourceTable, workspace,
                                                    targetName, whereClause)
    elif dataElementType == "DETable":
        arcpy.TableToTable_conversion(sourceTable, workspace, targetName,
                                      whereClause)
Beispiel #24
0
def copy():
    env.workspace = master_gdb

    for filename in germane_FCs:
        arcpy.FeatureClassToFeatureClass_conversion(filename, trans_gdb,
                                                    filename)
    for tablename in germane_tables:
        arcpy.TableToTable_conversion(tablename, trans_gdb, tablename)
Beispiel #25
0
def mergetables():
    '''function that converts shapefiles to tables and merges all tables'''

    for input, output in zip(elementShapefiles, elementTables):
        input_feature = os.path.join(env.workspace, input)
        arcpy.TableToTable_conversion(input_feature, env.workspace, output)

    merge = os.path.join(env.workspace, "elementRecords")
    arcpy.Merge_management(elementTables, merge)
    def execute(self, parameters, messages):
        # create GDB
        folder = parameters[0].valueAsText
        name = parameters[1].valueAsText
        arcpy.CreateFileGDB_management(folder, name)
        gdb_path = folder + '\\' + name

        # create garages shapefile, add to GDB
        garage_location = parameters[3].valueAsText
        garage_shp_name = parameters[4].valueAsText
        garages = arcpy.MakeXYEventLayer_management(garage_location, 'X', 'Y', garage_shp_name)
        arcpy.FeatureClassToGeodatabase_conversion(garages, gdb_path)
        garage_path = gdb_path + '\\' + garage_shp_name

        # create buildings shapefile given the structures .shp in Campus
        campus_gdb_path = parameters[2].valueAsText
        structures = campus_gdb_path + '\Structures'
        campus_buildings = gdb_path + '\\' + 'campus_building'
        arcpy.Copy_management(structures, campus_buildings)

        # reproject garages to the spatial reference of campus buildings
        projection = arcpy.Describe(campus_buildings).spatialReference
        arcpy.Project_management(garage_path, gdb_path + '\garage_projected', projection)
        garage_projected = gdb_path + '\garage_projected'

        # get building to buffer and buffer distance
        garage_selection = parameters[5].valueAsText
        buffer_distance = float(parameters[6].valueAsText)

        # make sure garage exists
        where = "Name = '%s'" % garage_selection
        cursor = arcpy.SearchCursor(garage_projected, where_clause=where)
        shouldProceed = False
        for row in cursor:
            if row.getValue('Name') == garage_selection:
                shouldProceed = True

        # if should proceed = true
        if shouldProceed:
            # generate the name for buffer layer
            garage_buff = r'\garage_%s_buffed_%s' % (garage_selection, buffer_distance)

            # get reference to building
            garageFeature = arcpy.Select_analysis(garage_projected, gdb_path + r'building_%s' % (garage_selection), where)

            # buffer selected garage
            garage_buffered = arcpy.Buffer_analysis(garageFeature, gdb_path + garage_buff, buffer_distance)

            # intersection of garage buffer and campus buildings
            arcpy.Intersect_analysis([gdb_path + garage_buff, gdb_path + r'\campus_buildings'], gdb_path + '\garage_building_intersection', 'All')

            # convert to csv
            arcpy.TableToTable_conversion(gdb_path + '\garage_building_intersection.dbf', 'C:\\Users\\Eileen\\Documents\\lab 5', 'nearbyBuildings.csv')
        else:
            messages.addErrorMessage('garage not found')
            raise arcpy.ExecuteError
        return
Beispiel #27
0
def leerCsv(tabla):
    nameTb = tabla.split("\\")[-1].split("_")[0]
    tb = arcpy.TableToTable_conversion(tabla, pathgdb, "TB_" + nameTb)
    modifyCoords(tb)
    xyTmp = arcpy.MakeXYEventLayer_management(
        tb, "X", "Y", os.path.join(pathgdb, "TB_" + nameTb),
        arcpy.SpatialReference(4326))
    copy = arcpy.CopyFeatures_management(xyTmp, os.path.join(pathgdb, nameTb))
    return copy
def mergeDuplicatesNoHost(tableName, workspace):
    '''Takes the table of Expanded DCA values,
    where every row represents a unique record
    and combines the TPA for duplicates resulting from
    the same DCA value but different HOST values
    '''
    mergedTableName = '{}_Merged'.format(tableName)
    countOfIDs = Counter(returnAllValuesFromField(tableName, 'ORIGINAL_ID'))
    if 2 in countOfIDs.values() or 3 in countOfIDs.values():
        duplicatDict = {}
        cursor = arcpy.da.SearchCursor(tableName, ['ORIGINAL_ID', 'TPA'],
                                       'DUPLICATE = 1')
        for row in cursor:
            if row[0] not in duplicatDict:
                duplicatDict[row[0]] = row[1]
            elif row[0] in duplicatDict:
                duplicatDict[row[0]] = row[1] + duplicatDict[row[0]]

    cursor = arcpy.da.SearchCursor(tableName, ['ORIGINAL_ID', 'TPA'],
                                   'DUPLICATE = 1')
    duplicatDict = {row[0]: row[1] for row in cursor}

    if duplicatDict:
        arcpy.TableToTable_conversion(tableName, workspace, mergedTableName)
        arcpy.MakeTableView_management(
            os.path.join(workspace, mergedTableName), mergedTableName)

        cursor = arcpy.da.UpdateCursor(mergedTableName, ['ORIGINAL_ID', 'TPA'],
                                       'DUPLICATE IS NULL')
        for row in cursor:
            if row[0] in duplicatDict.keys():
                row[1] = row[1] + duplicatDict[row[0]]
            cursor.updateRow(row)

        arcpy.SelectLayerByAttribute_management(mergedTableName,
                                                "NEW_SELECTION",
                                                'DUPLICATE = 1')

        arcpy.DeleteRows_management(mergedTableName)
        arcpy.SelectLayerByAttribute_management(mergedTableName,
                                                "CLEAR_SELECTION")
    else:
        arcpy.TableToTable_conversion(tableName, workspace, mergedTableName)
    return mergedTableName
def GDBAudit(GDBItems, GDBItemType, OutputLocation):

    # GDB Items to scratch table
    writelog(logFile, "Process: GDB Items to scratch table" + "\n")
    GDB_ITEMSTable = arcpy.TableToTable_conversion(GDBItems,
                                                   arcpy.env.scratchGDB,
                                                   "GDB_ITEMSTable", "", "",
                                                   "")
    writelog(logFile, "Process: GDB Items to scratch table Complete!" + "\n")

    # Delete Fields
    writelog(logFile, "Process: Delete Fields" + "\n")
    GDB_ITEMSTable = arcpy.DeleteField_management(GDB_ITEMSTable,
                                                  "Definition;Documentation")
    writelog(logFile, "Process: Delete Fields Complete!" + "\n")

    # GDB ITem Type to Scratch Table
    writelog(logFile, "Process: GDB ITem Type to Scratch Table" + "\n")
    GDB_ITEMTPYESTable = arcpy.TableToTable_conversion(
        GDBItemType, arcpy.env.scratchGDB, "GDB_ITEMTPYESTable", "", "",
        "")  #arcpy.env.scratchGDB
    writelog(logFile,
             "Process: GDB ITem Type to Scratch Table Complete!" + "\n")

    # Run Join Field GDP tool to put the GDB_ITEMS table and the GDB_ITEMTYPES table together
    writelog(
        logFile,
        "Process: Run Join Field GDP tool to put the GDB_ITEMS table and the GDB_ITEMTYPES table together"
        + "\n")
    GDB_ITEMSTable = arcpy.JoinField_management(GDB_ITEMSTable, "Type",
                                                GDB_ITEMTPYESTable, "UUID",
                                                "Name;ParentTypeID")
    writelog(
        logFile,
        "Process: Run Join Field GDP tool to put the GDB_ITEMS table and the GDB_ITEMTYPES table together Complete!"
        + "\n")

    # Table to Excel Conversion tool
    writelog(logFile, "Table to Excel Conversion tool" + "\n")
    arcpy.TableToExcel_conversion(GDB_ITEMSTable, OutputLocation, "NAME",
                                  "CODE")
    writelog(logFile, "Table to Excel Conversion tool Complete!" + "\n")
    writelog(logFile, "Table Located: " + str(OutputLocation) + "\n")
Beispiel #30
0
def convert_to_temp_csv(features):
	"""
		Just a helper function that exports an ArcGIS table to a temporary csv file and returns the path
	:param features:
	:return:
	"""
	filepath = tempfile.mktemp(".csv", prefix="arcgis_csv_table")
	folder, filename = os.path.split(filepath)
	arcpy.TableToTable_conversion(features, folder, filename)
	return filepath