Exemplo n.º 1
0
    def csvwriter(self,shape,background_points,data,grid_data):
        """Export csv from ArcGIS"""
        print("Writing csv files for analysis")
        FL = []

        field_list = arcpy.ListFields(shape)
        for field in field_list:
            f = field.name
            FL.append(f)

        fields = ';'.join(FL[2:])

        FLbkgrd = []

        field_list_bkgrd = arcpy.ListFields(background_points)
        for field_bkgrd in field_list_bkgrd:
            f_bkgrd = field_bkgrd.name
            FLbkgrd.append(f_bkgrd)
        fields_bkgrd = ';'.join(FLbkgrd[2:])


        try:
            arcpy.ExportXYv_stats(shape, fields, "COMMA", data,
                                                        "ADD_FIELD_NAMES")
            arcpy.ExportXYv_stats(background_points, fields_bkgrd, "COMMA",
                                            grid_data, "ADD_FIELD_NAMES")
        except:
            print arcpy.GetMessages(2)

        print("CSV files written")
        print("---------------------------------------------------------------")
Exemplo n.º 2
0
def SaveShpAsCSV(ShpFile,OutDir,OutputName):
    fields = arcpy.gp.listFields(ShpFile)
    fieldList2 = []
    for field in fields:
        if field.name != "Shape":
            fieldList2.append(str(field.name))
    #print fieldList2
    try:
        if not os.path.exists(OutDir+"\\"+OutputName+".csv"):
            arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES")
        else:
            os.remove(OutDir+"\\"+OutputName+".csv")
            arcpy.ExportXYv_stats(ShpFile,fieldList2,"COMMA",OutDir+"\\"+OutputName+".csv","ADD_FIELD_NAMES")
    except:
        errorStr =  arcpy.gp.GetMessages()
Exemplo n.º 3
0
    def arcgis_zstat_points_analysis(self, logger=defaultLogger):

        #arcpy.ImportToolbox("Model Functions")
        arcpy.ImportToolbox(TBX_LOCATION)

        #Split points into separate files
        intersectParam1 = (
            BUFFERS_90M_FILE + ' #;' + 
            (BUFFERS_FOLDER + BUFFER_FILE).format(**self.string_args) + ' #'

        )
        intersectSHP = (TEMP_GRID_FOLDER + 'intersect_lakes.shp').format(**self.string_args)
        self.string_args['ext'] = 'csv'
        dbfFile2 = (SAMPLE_DBF_FOLDER + SAMPLE_PTS_FILE).format(**self.string_args)
        self.string_args['ext'] = 'dbf'

        arcpy.Buffer_analysis(
            SAMPLE_POINTS_FILE, BUFFERS_90M_FILE, "90 Meters", "FULL", "ROUND", "NONE", ""
        )

        arcpy.Intersect_analysis(intersectParam1, intersectSHP, "ALL", "", "INPUT")

        arcpy.AddField_management(
            intersectSHP, "Zone_FID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""
        )

        arcpy.CalculateField_management(intersectSHP, "Zone_FID", "[FID]", "VB", "")

        fields = "FID_output;SiteCode;Year;Month;Day;Source;Date_freef;DOC;CDOM;CHL;Zone_FID"

        arcpy.ExportXYv_stats(intersectSHP, fields, "COMMA", dbfFile2, "NO_FIELD_NAMES")

        arcpy.gp.toolbox = (TBX_STR)

        arcpy.gp.SplitLayerByAttributes(
            intersectSHP, "FID", "FID_", POINTS_SPLIT_FOLDER.format(**self.string_args)
        )

        for [band_name, band_folder] in self.band_parameters:
            print('Processing band {}'.format(band_name))
            self.string_args['band']= band_name
            outFolder1 = (TEMP_GRID_FOLDER + 'ext_{band}').format(**self.string_args)
            outFolder2 = (TEMP_GRID_FOLDER + 'calc_{band}').format(**self.string_args)


            #Iterate through each file created when splitting points
            for iterationFile in glob(POINTS_SPLIT_FOLDER.format(**self.string_args) + '*.shp'):
                print(iterationFile)
                FID = iterationFile.split('\\')[-1].split('.')[0]
                dbfFile1 = (TEMP_DBF_FOLDER + BANDS_FILE_CALC).format(FID=FID, **self.string_args)

                arcpy.gp.ExtractByMask_sa(band_folder, iterationFile, outFolder1)
                arcpy.gp.RasterCalculator_sa("Int(\"{}\" * 0)".format(outFolder1), outFolder2)
                arcpy.BuildRasterAttributeTable_management(outFolder2, "NONE")
                arcpy.gp.ZonalStatisticsAsTable_sa(
                    outFolder2, "VALUE", outFolder1, dbfFile1, "DATA", "ALL"
                )

        logger.info('Performed zstat points analysis for scene {scene}'.format(**self.string_args))
        return None
Exemplo n.º 4
0
def task3():
    inraster_path = os.path.normcase("D:/NDVI Process/Climate layer/Prcp")
    inmask_path = os.path.normcase("D:/NDVI Parks/CalNorth/CalNorth Parks")
    output_path = os.path.normcase(
        "D:/NDVI Parks/CalNorth/near_dis_prcp_maysep")
    if os.path.exists(output_path) is False:
        os.mkdir(output_path)
    inraster_file_list = glob.glob((os.path.join(inraster_path, "*.shp")))
    year_labels = [str(year) for year in range(2000, 2014)]
    inmask_file_list = glob.glob((os.path.join(inmask_path, "*.shp")))
    data_dict = dict()
    for inmask_file in inmask_file_list:
        inmask_file_name = os.path.split(inmask_file)[-1][:-4]  # park name
        local_dist_list = list()
        local_dist_list.append(inmask_file_name)
        for inraster_file in inraster_file_list:
            inraster_file_name = os.path.split(inraster_file)[-1]
            year_list = range(2000, 2014)
            arcpy.Near_analysis(inmask_file, inraster_file)
            temp_txt_file = os.path.join(output_path, "dist.txt")
            arcpy.ExportXYv_stats(inmask_file, "NEAR_DIST", "space",
                                  temp_txt_file, "ADD_FIELD_NAMES")
            with open(temp_txt_file, "rb") as txt_rd:
                txt_rd.readline()
                value = txt_rd.readline().strip().split(" ")[2]
                local_dist_list.append(value)
        data_dict[inmask_file_name] = local_dist_list

    output_file_name = "prcp_near_distance.csv"
    output_file = os.path.join(output_path, output_file_name)
    with file(output_file, "wb") as output_fd:
        output_writer = csv.writer(output_fd)
        output_writer.writerow([' '] + year_labels)
        for key in sorted(data_dict.keys()):
            output_writer.writerow(data_dict[key])
Exemplo n.º 5
0
def task7():
    input_path = os.path.normcase("D:/NDVI/New York/parks/junaug near_temp")
    output_path = os.path.normcase("D:/NDVI/New York/parks/get_near_result")
    if os.path.exists(output_path) is False:
        os.mkdir(output_path)
    data_dict = dict()
    year_labels = [str(year) for year in range(2000, 2015)]
    input_file_dir_list = [
        os.path.join(input_path, directory)
        for directory in os.listdir(input_path)
        if os.path.isdir(os.path.join(input_path, directory))
    ]
    for input_directory in input_file_dir_list:  # iteret every park
        input_directory_name = os.path.split(input_directory)[-1]  # park name
        temp_mean_list = list()
        temp_mean_list.append(input_directory_name)
        input_file_list = sorted(
            glob.glob(os.path.join(input_directory, "*.shp")))
        for input_file in input_file_list:
            temp_txt_file = os.path.join(output_path, "aidsbycacnty.txt")
            arcpy.ExportXYv_stats(input_file, "junaug", "SPACE", temp_txt_file,
                                  "ADD_FIELD_NAMES")
            with open(temp_txt_file, "rb") as txt_rd:
                txt_rd.readline()
                value = txt_rd.readline().strip().split(" ")[2]
                temp_mean_list.append(value)
        data_dict[input_directory_name] = temp_mean_list
    output_name = os.path.join(output_path, "junaug near_temp.csv")
    with file(output_name, "wb") as output_fd:
        output_writer = csv.writer(output_fd)
        output_writer.writerow([' '] + year_labels)
        for key in sorted(data_dict.keys()):
            output_writer.writerow(data_dict[key])
Exemplo n.º 6
0
def export(List, layer, folder_shp, folder_csv):
    for i in range(len(List)):  # Loop through the ecoregions, then the states
        # Loop through the unique values
        for j in range(len(List[i])):
            # Select all points in the specifed ecoregion or state
            query = fieldList[i] + " = '" + List[i][j] + "'"
            print "Select Layer By Attribute for", layer + "_" + nameList[
                i] + nameCodes[i][j], "starts at", datetime.datetime.now(
                ).strftime("%I:%M:%S%p")
            arcpy.SelectLayerByAttribute_management(layer, "NEW_SELECTION",
                                                    query)

            # Copy the selection into a new shapefile
            print "Copy Features for", layer + "_" + nameList[i] + nameCodes[
                i][j], "starts at", datetime.datetime.now().strftime(
                    "%I:%M:%S%p")
            savename = folder_shp + layer + "_" + nameList[i] + nameCodes[i][
                j] + ".shp"
            arcpy.CopyFeatures_management(layer, savename)

            # Export the new shapefile to a csv
            print "ASCII export for", layer + "_" + nameList[i] + nameCodes[i][
                j], "starts at", datetime.datetime.now().strftime("%I:%M:%S%p")
            Output_ASCII_file = folder_csv + layer + "_" + nameList[
                i] + nameCodes[i][j] + ".csv"
            arcpy.ExportXYv_stats(
                savename,
                "pointid;CDL_2008;CDL_2009;CDL_2010;CDL_2011;CDL_2012;CDL_2013;CDL_2014;CDL_2015;CDL_2016;CDL_2017;CDL_2018;eco3_code;state_code",
                "COMMA", Output_ASCII_file, "ADD_FIELD_NAMES")
Exemplo n.º 7
0
def arc_spatial_join(WD, site_num_col='site', pour_dis=20, export_dir='results', catch_sites_csv = 'catch_sites.csv', catch_poly_csv = 'catch.csv'):
    # load in the necessary arcpy libraries to import arcpy
    import sys
    sys.path.append('C:\\Python27\\ArcGIS10.4\\Lib\\site-packages')
    sys.path.append(r'C:\Program Files (x86)\ArcGIS\Desktop10.4\arcpy')
    sys.path.append(r'C:\Program Files (x86)\ArcGIS\Desktop10.4\ArcToolbox\Scripts')
    sys.path.append(r'C:\Program Files (x86)\ArcGIS\Desktop10.4\bin')
    sys.path.append('C:\\Python27\\ArcGIS10.4\\lib')

    # Import packages
    import arcpy
    from arcpy import env
    import os

    env.workspace = WD
    final_export_dir = export_dir

    catch_poly = 'catch_del.shp'
    sites = 'sites_bound.shp'
    catch_sites_join = 'catch_sites_join.shp'

    arcpy.SpatialJoin_analysis(catch_poly, sites, catch_sites_join, "JOIN_ONE_TO_MANY", "KEEP_ALL", "", "WITHIN_A_DISTANCE", str(pour_dis + 10) + " Meters", "")

    # Remove unnecessary fields
    keep_fields = ['FID', 'Shape', 'GRIDCODE', site_num_col]
    rem_fields = [f.name for f in arcpy.ListFields(catch_sites_join)]
    [rem_fields.remove(x) for x in keep_fields]

    arcpy.DeleteField_management(catch_sites_join, rem_fields)

    ############################################
    #### Export data
    arcpy.ExportXYv_stats(catch_sites_join, "GRIDCODE;" + site_num_col, "COMMA", os.path.join(final_export_dir, catch_sites_csv), "ADD_FIELD_NAMES")
    arcpy.ExportXYv_stats(catch_poly, "ID;GRIDCODE;area_m2", "COMMA", os.path.join(final_export_dir, catch_poly_csv), "ADD_FIELD_NAMES")

    ###########################################
    #### Check back in the spatial analyst license once done
    arcpy.CheckInExtension('Spatial')
Exemplo n.º 8
0
def task7(state, day_list, epochs):
    for epoch in epochs:
        input_path = os.path.normcase("D:/NDVI/" + state + "/parks/" + epoch +
                                      " near_temp")
        output_path = os.path.normcase("D:/NDVI/" + state +
                                       "/parks/get_near_result")
        if os.path.exists(output_path) is False:
            os.mkdir(output_path)
        output_name = os.path.join(output_path, epoch + " near_temp.csv")
        if os.path.exists(output_name):
            continue
        data_dict = dict()
        year_labels = [str(year) for year in range(2000, 2015)]
        input_file_dir_list = [
            os.path.join(input_path, directory)
            for directory in os.listdir(input_path)
            if os.path.isdir(os.path.join(input_path, directory))
        ]
        for input_directory in input_file_dir_list:  # iteret every park
            input_directory_name = os.path.split(input_directory)[
                -1]  # park name
            temp_mean_list = list()
            temp_mean_list.append(input_directory_name)
            input_file_list = sorted(
                glob.glob(os.path.join(input_directory, "*.shp")))
            year = 2000
            for input_file in input_file_list:
                print("task7 is processing %s of %s" % (input_file, epoch))
                input_file_year = input_file[-8:-4]
                while year < int(input_file_year):
                    temp_mean_list.append("NA")
                    year += 1
                year += 1
                temp_txt_file = os.path.join(output_path, "aidsbycacnty.txt")
                try:
                    arcpy.ExportXYv_stats(input_file, epoch, "SPACE",
                                          temp_txt_file, "ADD_FIELD_NAMES")
                    with open(temp_txt_file, "rb") as txt_rd:
                        txt_rd.readline()
                        value = txt_rd.readline().strip().split(" ")[2]
                        temp_mean_list.append(value)
                except:
                    temp_mean_list.append("ERROR")
            data_dict[input_directory_name] = temp_mean_list
        output_name = os.path.join(output_path, epoch + " near_temp.csv")
        with file(output_name, "wb") as output_fd:
            output_writer = csv.writer(output_fd)
            output_writer.writerow([' '] + year_labels)
            for key in sorted(data_dict.keys()):
                output_writer.writerow(data_dict[key])
Exemplo n.º 9
0
def exportToCsvs():
    '''exports every shapefile to csv'''
    env.workspace = pathlist[1]
    polylist = arcpy.ListFeatureClasses()
    for poly in polylist:
        try:
            outname = str(poly)[:-9] + ".csv"
            print outname
            arcpy.ExportXYv_stats(poly, ["SCINAME", "AREA_GEO"], "COMMA",
                                  pathlist[2] + outname, "ADD_FIELD_NAMES")

            print "Finished exporting:", outname
        except Exception:
            print "Encountered error with", outname
            print arcpy.GetMessages()
            pass
 def get_near_distance_task2(self):
     inraster_path = os.path.normcase(
         "D:/Environment Factors/Ozone aot layer")
     inmask_path = os.path.normcase(
         os.path.join(self.state_path, self.state_name + " parks"))
     output_path = os.path.normcase(
         os.path.join(self.state_path, "near_dis_aot_aprsep"))
     if os.path.exists(output_path) is False:
         os.mkdir(output_path)
     inraster_file_list = glob.glob((os.path.join(inraster_path, "*.shp")))
     year_labels = [str(year) for year in range(2000, 2014)]
     inmask_file_list = glob.glob((os.path.join(inmask_path, "*.shp")))
     data_dict = dict()
     for inmask_file in inmask_file_list:
         inmask_file_name = os.path.split(inmask_file)[-1][:-4]  # park name
         local_dist_list = list()
         local_dist_list.append(inmask_file_name)
         for inraster_file in inraster_file_list:
             inraster_file_name = os.path.split(inraster_file)[-1]
             year_list = range(2000, 2014)
             try:
                 arcpy.Near_analysis(inmask_file, inraster_file)
             except:
                 local_dist_list = list()
                 break
             temp_txt_file = os.path.join(output_path, "dist.txt")
             arcpy.ExportXYv_stats(inmask_file, "NEAR_DIST", "space",
                                   temp_txt_file, "ADD_FIELD_NAMES")
             with open(temp_txt_file, "rb") as txt_rd:
                 txt_rd.readline()
                 value = txt_rd.readline().strip().split(" ")[2]
                 local_dist_list.append(value)
         if local_dist_list:
             data_dict[inmask_file_name] = local_dist_list
     output_file_name = "near_distance.csv"
     output_file = os.path.join(output_path, output_file_name)
     with file(output_file, "wb") as output_fd:
         output_writer = csv.writer(output_fd)
         output_writer.writerow([' '] + year_labels)
         for key in sorted(data_dict.keys()):
             output_writer.writerow(data_dict[key])
Exemplo n.º 11
0
def spatialJoin(target_file, join_file, output_fields):
    try:
        # make XY event from a text file
        out_Layer = 'p' + target_file[1:-4]
        spRef = r"Coordinate Systems\Geographic Coordinate Systems\World\WGS 1984.prj"
        arcpy.MakeXYEventLayer_management(target_file, 'x', 'y', out_Layer,
                                          spRef)
        arcpy.CopyFeatures_management(out_Layer, 'tem.shp')

        # spatial join
        arcpy.SpatialJoin_analysis('tem.shp', join_file, 'sj_lyr')

        # export joined data
        arcpy.ExportXYv_stats('sj_lyr.shp', output_fields, 'COMMA',
                              'sj_' + target_file, 'ADD_FIELD_NAMES')
    except Exception as err:
        print(err.args[0])

    # remove unnecessary files
    os.remove('./data/sj_' + target_file[:-3] + 'txt.xml')
    arcpy.Delete_management('tem.shp')
    arcpy.Delete_management('sj_lyr.shp')
def writePoints(dataDict, time, timeSecInterval, filePath):
    start = addTime(time, 0)

    for k, v in dataDict.items():
        rowOutput = str(v).split(' ')
        rowOutput.append(k)
        rowOutput[4] = re.findall('\d+|\D+', rowOutput[4])[1]
        rowOutput.append(start)
        rowOutput.append(arcpy.Point(float(rowOutput[0]), float(rowOutput[1])))
        with arcpy.da.InsertCursor(
                "tempFC", ['X', 'Y', 'Z', 'M', 'UniqueID', 'Time', 'SHAPE@'
                           ]) as insertCursor:
            insertCursor.insertRow(rowOutput)

        start = addTime(start, timeSecInterval)

    arcpy.Project_management("tempFC", "tempFCPRJ", 4326)
    arcpy.MakeFeatureLayer_management("tempFCPRJ", 'in_memory\eventLayer',
                                      "UniqueID = '" + rowOutput[4] + "'")
    arcpy.ExportXYv_stats('in_memory\eventLayer', "OBJECTID;UniqueID;Time",
                          "COMMA", filePath + "/" + rowOutput[4] + ".csv",
                          "ADD_FIELD_NAMES")  #filePath +
    arcpy.Delete_management('in_memory\eventLayer')
    L3 = str(out_file)
    L4 = """ ' ) """
    L5 = '"""'
    L6 = """ ) """
    Select_PNF = L0+L1+L2+L3+L4+L5+L6
    eval(Select_PNF)
    print("Select_PNF Evaluated properly")
    if PNF_NF_Small_Lake_exclude == 1:
        arcpy.SelectLayerByAttribute_management(NHLD_LakeSet, "REMOVE_FROM_SELECTION", "\"Area_m2_0\" < 10000") #PNF_VertPt only larger than 1ha (10000 m2)

#------------------------------------------------------------------------------>
    #Save HUC12 PNF
    arcpy.CopyFeatures_management(NHLD_LakeSet, outfile_PNF_lakes)

    # Process: Export Feature Attribute to ASCII
    arcpy.ExportXYv_stats(outfile_PNF_lakes, "FID;Permanent_;FDate;GNIS_ID;GNIS_Name;ReachCode;FType;FCode;Longitude;Latitude;BufferID;Perim_m_0;Area_m2_0;WALA_0;PNF_HUC12;RASTERVALU;ORIG_FID;Perim_m_1;Area_m2_1;WALA_1;MBG_Width;MBG_Length;Width_m_1;MaxSimpTol;MinSimpTol;BUFF_DIST;Perim_m_S;Area_m2_S;Width_m_S", "COMMA", outfile_PNF_lakes_txt, "ADD_FIELD_NAMES")
#------------------------------------------------------------------------------>
    #Select PNF Vert select Permanent_ that match PNF Poly and use eval() to select. Issue in PNF could arise on edges where HUC12 is clipped by NHLD boundary
    UniPerm_PNF = []
    fc = outfile_PNF_lakes
    cursor = arcpy.SearchCursor(fc)
    for row in cursor:
        UniPerm_PNF.append(row.getValue('Permanent_'))
    UniPerm_PNF_str = str("','".join(UniPerm_PNF))

    L0 = """ arcpy.SelectLayerByAttribute_management(NHLD_LakeSet_VertPt, "NEW_SELECTION", """
    L1 = '"""'
    L2 = """ "Permanent_" in ('"""
    L3 = UniPerm_PNF_str
    L4 = """ ' ) """
    L5 = '"""'
Exemplo n.º 14
0
# Process: Create Points Along Lines
Points_along_Line = "D:/GullyGeoChallenge/data/Step.gdb/Points_along_Line"
arcpy.CreatePointsAlongLines_alonglines(Points_to_Line, Points_along_Line,
                                        "0.05", "VALUE", "NO_END_POINTS")

# Process: Extract Values to Points
Values_Extracted_to_Points = "D:/GullyGeoChallenge/data/Step.gdb/Values_Extracted_to_Points"
arcpy.gp.ExtractValuesToPoints_sa(Points_along_Line, tejeria_dsm_medium_tif,
                                  Values_Extracted_to_Points, "NONE",
                                  "VALUE_ONLY")

# Process: Add Field
arcpy.AddField_management(Values_Extracted_to_Points, "Distance", "FLOAT", "",
                          "", "", "", "NULLABLE", "NON_REQUIRED", "")

# Process: Calculate Field
Expression_2 = "([OBJECTID] -1) * 0.05"
arcpy.CalculateField_management(Values_Extracted_to_Points, "Distance",
                                Expression_2, "VB", "")

# Process: Export Feature Attribute to ASCII
# Change the GPS number sin the Tej_csv parameter (1 and 3 set as standard) to the points chosen in the select tool above).
# Leave the 0s in front of the numbers numbers away.
Tej_csv = "D:/GullyGeoChallenge/data/Results.gdb/Tej_26_28.csv"
arcpy.ExportXYv_stats(Values_Extracted_to_Points, "RASTERVALU;Distance",
                      "COMMA", Tej_csv, "true")

print "Done"
########## END OF SCRIPT ##########
Exemplo n.º 15
0
import os
import glob

import arcpy
from arcpy import env
from arcpy.sa import *

env.Workspace = "D:/NDVI Process/Environment/"
arcpy.env.overwriteOutput = True

if __name__ == "__main__":
    result_path = os.path.normcase("D:/NDVI Process/Result")
    target_path = os.path.normcase("D:/NDVI Process/Stats")

    year_list = range(2000, 2014)
    value_fields = [
        "GRIDCODE", "GRIDCODE_6", "GRIDCOD_12", "GRIDCOD_18", "GRIDCOD_24",
        "GRIDCOD_30", "GRIDCOD_36", "mean", "mean_1", "maysep", "maysep_1"
    ]
    #  value_fields = "GRIDCODE_12"
    for year in year_list:
        file_path = os.path.join(result_path, str(year))
        file_list = glob.glob(os.path.join(file_path, "*.shp"))
        for shp_file in file_list:
            out_file = os.path.join(target_path,
                                    os.path.split(shp_file)[-1][:-4] + ".txt")
            arcpy.ExportXYv_stats(shp_file, value_fields, "SPACE", out_file,
                                  "ADD_FIELD_NAMES")
    print("dfjdjf")
Exemplo n.º 16
0
def indicator_to_csv(OutputCSVFile, IndicatorID, GeographicLevelID, Language, DGUIDs):

    geoID = GeographicLevelID.strip().upper()[0:5]
    lng = Language.upper()

    # prepare DGUIDs if present
    dguid_str = prepare_dguids(DGUIDs)

    # select and rename fields to be exported based on language selection
    # note special characters are automatically changed to _ by arcpy
    if lng == "EN":
        d = {
            "DGUID" : "DGUID",
            "Loc" : "Location", 
            "Prov" : "Province_Territory", 
            "Value" : "Value",
            "Desc" : "Data_Comment"
        }
    elif lng == "FR":
        d = {
            "DGUID" : "IDUGD", 
            "Loc" : "Endroit", 
            "Prov" : "Province_Territoire", 
            "Value" : "Valeur", 
            "Desc" : "Commentaire" 
        }

    # Build query string
    qry = "SELECT grfi.GeographyReferenceId AS " + d["DGUID"] + ", " \
        "g.DisplayNameLong_" + lng + " AS " + d["Loc"] + ", " \
        "g.ProvTerrName_" + lng + " AS " + d["Prov"] + ", " \
        "iv.value AS " + d["Value"] + ", " \
        "nr.Description_" + lng + " AS " + d["Desc"] + ", g.Shape " \
        "FROM gis.geographyreference AS g INNER JOIN gis.geographyreferenceforindicator AS grfi " \
        "ON g.geographyreferenceid = grfi.geographyreferenceid  INNER JOIN " \
        "(select * from gis.indicator where indicatorId = " + IndicatorID + ") AS i " \
        "ON grfi.indicatorid = i.indicatorid  INNER JOIN gis.geographiclevel AS gl " \
        "ON g.geographiclevelid = gl.geographiclevelid INNER JOIN " \
        "gis.geographiclevelforindicator AS glfi ON i.indicatorid = glfi.indicatorid " \
        "AND gl.geographiclevelid = glfi.geographiclevelid INNER JOIN " \
        "gis.indicatorvalues AS iv ON iv.indicatorvalueid = grfi.indicatorvalueid " \
        "INNER JOIN gis.indicatortheme AS it ON i.indicatorthemeid = it.indicatorthemeid " \
        "LEFT OUTER JOIN gis.indicatornullreason AS nr ON iv.nullreasonid = nr.nullreasonid " \
        "WHERE g.GeographicLevelID = '" + geoID + "' "
        
    if DGUIDs != "":
        qry += "AND grfi.GeographyReferenceID IN (" + dguid_str + ") "
        
    arcpy.AddMessage(qry)
        
    # Make Query Layer from result of primary query - results in feature layer
    sr = arcpy.SpatialReference(3857) # WGS_1984_Web_Mercator_Auxiliary_Sphere
    all_polys = arcpy.MakeQueryLayer_management(input_database=csge_sde, 
        out_layer_name=os.path.join(scratch_path, "all_polys.shp"), 
        query=qry, oid_fields=d["DGUID"], 
        shape_type="POLYGON", spatial_reference=sr)
        
    # Export CSV - semi colon delimiter is used b/c many values contain commas.
    # Note: table to table tool does not support custom delimiters as of pro version 2.8.
    # Ignoring OutputCSVFile arg allows consistent filename when service is published
    arcpy.ExportXYv_stats(Input_Feature_Class=all_polys, Value_Field=list(d.values()), 
        Delimiter="SEMI-COLON", Output_ASCII_File="Export.txt", 
        Add_Field_Names_to_Output="ADD_FIELD_NAMES")
Exemplo n.º 17
0
	out_coor_system = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]",
	transform_method = "WGS_1984_(ITRF00)_To_NAD_1983",
	in_coor_system = "PROJCS['NAD_1983_StatePlane_Massachusetts_Mainland_FIPS_2001',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',200000.0],PARAMETER['False_Northing',750000.0],PARAMETER['Central_Meridian',-71.5],PARAMETER['Standard_Parallel_1',41.71666666666667],PARAMETER['Standard_Parallel_2',42.68333333333333],PARAMETER['Latitude_Of_Origin',41.0],UNIT['Meter',1.0]]"
)
arcpy.AddMessage("Points projected to WGS84: " + fc_Densify_VertPoints_Project)

# 6) Add XY Coordinates
arcpy.AddXY_management(
	in_features = fc_Densify_VertPoints_Project
)

# 7) Export Attribute Table to CSV
arcpy.ExportXYv_stats(
	Input_Feature_Class = fc_Densify_VertPoints_Project, 
	Value_Field = Value_Field, 
	Delimiter = "COMMA", 
	Output_ASCII_File = csv_output, 
	Add_Field_Names_to_Output = "ADD_FIELD_NAMES"
)
arcpy.AddMessage("(lon,lat) points added to field, exported as csv: " + csv_output)

# ---------------------------------------------------------------------------

# 8) Parse CSV to collect (lat,lng) pairs for each unique ID
snap_list = []
with open (csv_output, 'rb') as infile:
	snap_list_num = -1
	id = ''
	prev_id = ''
	unique_id = str(unique_id)
	reader = csv.DictReader(infile)
def joinDEMcoverExport(DEM_gr, cellCenters, site, aggregationFactor, path,
                       sitePoly):

    import arcpy
    from arcpy import env
    env.workspace = "%s\\ChangeModeling\\%s" % (path, site)
    arcpy.env.overwriteOutput = True
    env.scratchWorkspace = "%s\\ChangeModeling\\Scratch.gdb" % path

    # spatial join to grid poly (contains cover info) from centroids (contains DEM-derived topo vars)
    AllData = arcpy.SpatialJoin_analysis(DEM_gr, cellCenters, "AllData.shp",
                                         "#", "#", "#", "CONTAINS")
    print "Topo vars joined with cell cover"

    # explicitly add Northing and Easting of centroids
    AllData = arcpy.AddField_management(AllData, "northing", "DOUBLE")
    AllData = arcpy.CalculateField_management(AllData, "northing",
                                              "!SHAPE.CENTROID.Y!",
                                              "PYTHON_9.3")
    AllData = arcpy.AddField_management(AllData, "easting", "DOUBLE")
    AllData = arcpy.CalculateField_management(AllData, "easting",
                                              "!SHAPE.CENTROID.X!",
                                              "PYTHON_9.3")
    print "Northing/Easting added"
    arcpy.AddGeometryAttributes_management(AllData, "CENTROID;CENTROID_INSIDE")
    print "Centroids (inside and out) added"

    AllData = arcpy.AddField_management(AllData, "cell_area", "DOUBLE")
    AllData = arcpy.CalculateField_management(AllData, "cell_area",
                                              "!SHAPE.AREA!", "PYTHON_9.3")
    print "area field added"
    # in order to change the name from AllData, we need to convert it.  silly ArcGIS.
    AllData_final = arcpy.FeatureClassToFeatureClass_conversion(
        AllData, env.workspace,
        "AllData_%s_%s.shp" % (site, aggregationFactor))
    print "Final dataset shapefile created"

    # export as csv
    if site == "WC":
        ending = arcpy.ExportXYv_stats(
            AllData_final, [
                "Cover1948", "Grass1948", "BareG1948", "Cover2009",
                "Grass2009", "BareG2009", "elev", "slope", "curv", "curv_plan",
                "curv_prof", "eastn", "northn", "tmi", "northing", "easting",
                "CENTROID_X", "CENTROID_Y", "INSIDE_X", "INSIDE_Y",
                "cell_area", "insol", "distridge", "heatind"
            ], "COMMA", "AllData_%s_%s.csv" % (site, aggregationFactor),
            "ADD_FIELD_NAMES")
    else:
        ending = arcpy.ExportXYv_stats(
            AllData_final, [
                "Cover1948", "Grass1948", "BareG1948", "Cover2009",
                "Grass2009", "BareG2009", "elev", "slope", "curv", "curv_plan",
                "curv_prof", "eastn", "northn", "tmi", "northing", "easting",
                "CENTROID_X", "CENTROID_Y", "INSIDE_X", "INSIDE_Y",
                "cell_area", "insol", "distridge", "heatind", "NEAR_DIST"
            ], "COMMA", "AllData_%s_%s.csv" % (site, aggregationFactor),
            "ADD_FIELD_NAMES")
    print "exported to csv"
    # and congratulations are definitely in order. :)
    print "Congratulations! You have made ArcGIS do stuff!"
Exemplo n.º 19
0
in_table = out_path+out_name
x_coords="Longitude"
y_coords="Latitude"
z_coords="mean"
out_layer="event"+str(year)
saved_layer = out_path+"event"+str(year)+".lyr"
spRef = r"c:\Dian\United States\background\Coordinate.prj"
arcpy.MakeXYEventLayer_management(in_table,x_coords,y_coords,out_layer,spRef,z_coords)
arcpy.SaveToLayerFile_management(out_layer, saved_layer)
gp.FeatureClassToShapefile(saved_layer,out_path)
print "event " + str(year)
    
in_point = out_path + "event" + str(year) + ".lyr"
in_raster = in_r + "estats" + str(year)
outpoint = out_path + "Tempozone" + str(year) + ".shp"
arcpy.CheckOutExtension("Spatial")
ExtractValuesToPoints(in_point, in_raster, outpoint)
print "Tempozone" + str(year)

#here I want to export the attribute table of the shapfile 
input_features = out_path+"Tempozone"+str(year)+".shp"
export_ASCII = out_path+"Tempozone"+str(year)+".txt"
arcpy.env.workspace = env.workspace
arcpy.ExportXYv_stats(input_features,["YEAR","STATECODE","COUNTYCODE","SITENUM", "LATITUDE", "LONGITUDE", "MEAN","SD", "RASTERVALU"], "COMMA", export_ASCII, "ADD_FIELD_NAMES")
print "TPOZ" +str(year)
    
   



Exemplo n.º 20
0
arcpy.CopyFeatures_management("fish_lyr", "fishnet_final.shp")
arcpy.AddField_management("fishnet_final.shp", "area", "FLOAT")
arcpy.CalculateField_management("fishnet_final.shp", "area", "!shape.area!",
                                "PYTHON")

user_soil = input(
    "Do you want to generate Soil Parameter File?\n (Type 1 for YES and 0 for NO:)"
)

if user_soil == 1:
    print "Preparing soil parameter file, dont open any files until it completes... \n"
    arcpy.ExcelToTable_conversion(soil_appendix, "soil_app1")
    arcpy.AddJoin_management("fish_lyr", "SOIL", "soil_app1", "SOIL_CLASS")
    cols = [a.name for a in arcpy.ListFields("fish_lyr")]
    cols_to_take = [cols[0]] + cols[3:5] + cols[6:9] + cols[12:]
    arcpy.ExportXYv_stats("fish_lyr", cols_to_take, "COMMA", "temp_soil.csv",
                          "ADD_FIELD_NAMES")
    temp = pd.read_csv(workspace + "\\" + "temp_soil.csv")
    temp = temp.iloc[:, 2:]

    temp["dsmax"] = temp["FISHNET_F.SLOPE"] * temp["SOIL_APP1:KSAT_Z1"]
    temp["grid_no"] = temp['FISHNET_F.FID']
    temp = temp.drop(['FISHNET_F.FID', 'FISHNET_F.SLOPE'], axis=1)
    c1 = temp.columns.tolist()
    colord = [c1[0]] + [c1[-1]] + c1[2:6] + [c1[-2]
                                             ] + c1[6:20] + [c1[1]] + c1[20:-2]
    new_temp = temp[colord]
    os.chdir(workspace)

    arcpy.AddJoin_management("fish_lyr", "SOIL", "soil_app1", "SOIL_CLASS")
    cols = [a.name for a in arcpy.ListFields("fish_lyr")]
    cols_to_take = [cols[0]] + cols[3:5] + cols[6:9] + cols[12:]
Exemplo n.º 21
0
    except:
        print 'Warning: Unable to delete original grid_SWMM'

    ap.FeatureClassToFeatureClass_conversion (grid_Dir, SWMM_prep, "grid_SWMM")
    # shapefiles to combine
    to_combine = [grid_Manning, grid_Slope, grid_ISAT]
    # create list of desired fields to pass to final object
    desired = ['Manning', 'dep_stor', 'Majority', 'Slope', 'Accum', 'MEAN_IS']
    # combine desired fields into one shape file
    [ap.JoinField_management(grid_SWMM, 'OBJECTID', to_combine[idx], 'OBJECTID',
                             desired) for idx in range(len(to_combine))]

    print "\n"
    print "Success!"
    print "Check grid_SWMM is correct"
    print "\n"
    # Compare with original grid_SWMM_old in SWMM_prep_old.gdb

if 'export' in runsteps:
    print 'grid_SWMM must exist at:', grid_SWMM
    try:
        os.remove (table_SWMM)
    except:
        print 'Creating new table'
    # export to csv
    keep_fields = ['Zone', 'Flow_ID', 'Accum', 'MEAN_IS', 'Slope', 'Manning',
                   'dep_stor', 'MODEL_TOP', 'COLUMN_', 'ROW']
    ap.ExportXYv_stats(grid_SWMM, keep_fields, 'COMMA', table_SWMM, 'ADD_FIELD_NAMES')

ap.CheckInExtension("Spatial")
## Select all fishing events that are less than upper bound distance *** I can't get this to work correctly, so I did this manually first before running code
#qry = "\"LENGTH\" <  '" + dist1 + "'
#arcpy.AddMessage("Selecting fishing events "+ qry)
#arcpy.SelectLayerByAttribute_management(v_Name_Lyr, "NEW_SELECTION", qry )

# Write the selected features to a new featureclass
#arcpy.CopyFeatures_management(v_Name_Lyr, v_Name_Selected)

# Process: Intersect
arcpy.AddMessage("Intersecting fishing with polygons...")
inFeatures = [fe, area_Poly]
arcpy.Intersect_analysis(inFeatures, v_Name_Intersect, "ALL", "", "LINE")

# Process: Buffer
arcpy.AddMessage("Buffering...")
arcpy.Buffer_analysis(v_Name_Intersect, v_Name_Buffer, dist2, "FULL", "ROUND",
                      "LIST", "Reef")

# Process: Clip
arcpy.AddMessage("Clipping...")
arcpy.Clip_analysis(v_Name_Buffer, area_Poly, v_Name_Final, "")

# Process: Export Feature Attribute to ASCII...
arcpy.AddMessage("Exporting attribute table...")
arcpy.env.workspace = out_folder_path
input_features = outLocation + "\\" + v_Name_Final
export_ASCII = fename + "_Area.csv"
arcpy.ExportXYv_stats(input_features, ["Reef", "Shape_Area"], "COMMA",
                      fename + "_Area.csv", "ADD_FIELD_NAMES")
Exemplo n.º 23
0
    def arcgis_zstat_selected_points_analysis(self, logger=defaultLogger):

        #arcpy.ImportToolbox("Model Functions")
        arcpy.ImportToolbox(TBX_LOCATION)
        arcpy.gp.toolbox = TBX_STR

        #Split points into separate files
        self.string_args['ext'] = 'dbf'
        intersectParam1 = (
            SEL_BUFFERS_90M_FILE + ' #;' + 
            (BUFFERS_FOLDER + BUFFER_FILE).format(**self.string_args) + ' #'
        )

        intersectSHP = TEMP_GRID_FOLDER.format(**self.string_args) + 'intersect_sel_lakes.shp'
        dbfFile2 = (SEL_POINTS_FOLDER + SEL_POINTS_FILE).format(**self.string_args)

        if not os.path.exists(intersectSHP):
            arcpy.Intersect_analysis(intersectParam1, intersectSHP, "ALL", "", "INPUT")

        if not os.path.exists(dbfFile2) and not os.path.exists(dbfFile2.replace('dbf','csv')):
            arcpy.AddField_management(
                intersectSHP, "Zone_FID", "LONG", "", "", "", "", "NULLABLE", "NON_REQUIRED", ""
            )
            arcpy.CalculateField_management(intersectSHP, "Zone_FID", "[FID]", "VB", "")

            arcpy.ExportXYv_stats(
                intersectSHP, "FID;SubSite;SiteCode;Count;CDOM", "COMMA",
                dbfFile2, "ADD_FIELD_NAMES"
            )

        if (not os.path.exists((SEL_SPLIT_FOLDER + 'FID_00.shp').format(**self.string_args)) 
            and not os.path.exists((SEL_SPLIT_FOLDER + 'FID_0.shp').format(**self.string_args))):

            arcpy.gp.SplitLayerByAttributes(
                intersectSHP, "FID", "FID_", SEL_SPLIT_FOLDER.format(**self.string_args)
            )

        for [band_name, band_folder] in self.band_parameters:
            self.string_args['band']=band_name
            outFolder1 = (TEMP_GRID_FOLDER + 'ext_{band}').format(**self.string_args)
            outFolder2 = (TEMP_GRID_FOLDER + 'calc_{band}').format(**self.string_args)


            #Iterate through each file created when splitting points
            for iterationFile in glob((SEL_SPLIT_FOLDER + 'FID_*.shp').format(**self.string_args)):
                FID = iterationFile.split('\\')[-1].split('.')[0]
                dbfFile1 = (
                    SEL_TEMP_DBF_FOLDER + SEL_BANDS_FILE_CALC).format(FID=FID, **self.string_args
                )
                if not os.path.exists(dbfFile1) and not os.path.exists(dbfFile1[0:-3] + 'csv'):
                    print(dbfFile1)
                    arcpy.gp.ExtractByMask_sa(band_folder, iterationFile, outFolder1)
                    arcpy.gp.RasterCalculator_sa("Int(\"{}\" * 0)".format(outFolder1), outFolder2)
                    time.sleep(5)
                    arcpy.BuildRasterAttributeTable_management(outFolder2, "NONE")
                    arcpy.gp.ZonalStatisticsAsTable_sa(
                        outFolder2, "VALUE", outFolder1, dbfFile1, "DATA", "ALL"
                    )

        logger.info('Performed selected points analysis for scene {scene}'
            .format(**self.string_args))
        return None
Exemplo n.º 24
0
inputdir = "D:/007/tasks/T-drive_Taxi_Trajectories/AIS_Data_gdb"
outputdir = "D:/007/tasks/T-drive_Taxi_Trajectories/AIS_Data_txt"
input_features = "Broadcast"
value_field = ['OBJECTID', 'BaseDateTime', 'SOG', 'COG', 'MMSI']

files = os.listdir(inputdir)
for f in files:
    if os.path.splitext(f)[1] == '.gdb':
        # Script arguments...
        inputfile = inputdir + os.sep + f

        # =============== file name process ======================
        basename = os.path.splitext(f)[0]
        export_ASCII = outputdir + os.sep + basename + ".txt"

        if os.path.exists(export_ASCII) == False:
            print inputfile
            # Process: Raster To Other Format (multiple)...
            try:
                # Set the current workspace (to avoid having to specify the full path to the feature classes each time)
                arcpy.env.workspace = inputfile

                # Process: Export Feature Attribute to ASCII...
                arcpy.ExportXYv_stats(input_features, value_field, 'COMMA',
                                      export_ASCII, "ADD_FIELD_NAMES")

            except:
                # If an error occurred when running the tool, print out the error message.
                print(arcpy.GetMessages())

            print export_ASCII
Exemplo n.º 25
0
        def run(IFile, Ofile, Fnum, Nfield, Aout, Csvout):

            import arcpy
            import os
            import os.path
            from arcpy import env
            env.overwriteOutput = "True"

            # Set environment settings
            env.workspace = Ofile

            # Set local variables
            print "--------------------------------------------------------------------"
            print "Program ExtractValue2Point Starts: ", time.asctime(
                time.localtime(time.time()))
            print "--------------------------------------------------------------------"
            for x in xrange(0, int(Fnum)):
                if (Aout == "Yes"):

                    if os.path.isfile(Ofile + "/" + "selected_features" +
                                      str(x) + ".shp"):
                        print "output value of Grid ", x
                        print ""
                        inFeatures = "selected_features" + str(x) + ".shp"
                        export_ASCII = "ASCII " + str(x) + ".txt"
                        inRasterList = [[IFile, str(Nfield)]]

                        # Check out the ArcGIS Spatial Analyst extension license
                        arcpy.CheckOutExtension("Spatial")

                        # Execute ExtractValuesToPoints
                        ExtractMultiValuesToPoints(inFeatures, inRasterList,
                                                   "BILINEAR")
                        arcpy.ExportXYv_stats(inFeatures, str(Nfield), "SPACE",
                                              export_ASCII, "ADD_FIELD_NAMES")

                else:
                    if os.path.isfile(Ofile + "/" + "selected_features" +
                                      str(x) + ".shp"):
                        print "output value of Grid ", x
                        print ""
                        inFeatures = "selected_features" + str(x) + ".shp"
                        inRasterList = [[IFile, str(Nfield)]]

                        # Check out the ArcGIS Spatial Analyst extension license
                        arcpy.CheckOutExtension("Spatial")

                        # Execute ExtractValuesToPoints
                        ExtractMultiValuesToPoints(inFeatures, inRasterList,
                                                   "BILINEAR")

            if (Csvout == "Yes"):
                for x in xrange(0, int(Fnum)):
                    if os.path.isfile(Ofile + "/" + "selected_features" +
                                      str(x) + ".shp"):
                        filename = Ofile + "/selected_features" + str(
                            x) + ".dbf"
                        if filename.endswith('.dbf'):
                            print "Converting %s to csv" % filename
                            csv_fn = filename[:-4] + ".csv"
                            with open(csv_fn, 'wb') as csvfile:
                                in_db = dbf.Dbf(filename)
                                out_csv = csv.writer(csvfile)
                                names = []
                                for field in in_db.header.fields:
                                    names.append(field.name)
                                out_csv.writerow(names)
                                for rec in in_db:
                                    out_csv.writerow(rec.fieldData)
                                in_db.close()
                                print "Done..."
                        else:
                            print "Filename does not end with .dbf"
            else:
                print "-------------------------------------------"
                print "Not generating all the csv files"

            print ""
            print "the value has been added to all the grid file"
            print "the ASCII files have been generated"
            print "--------------------------------------------------------------------"
            print "Program ExtractValue2Point Ends: ", time.asctime(
                time.localtime(time.time()))
            print "--------------------------------------------------------------------"
Exemplo n.º 26
0
tejeria_dsm_medium_tif = "tejeria_dsm_medium.tif"
Value_Field = "RASTERVALU;Distance"
Delimiter = "COMMA"
Add_Field_Names_to_Output = "true"
Field_Name = "Distance"
Field_Name__2_ = "Distance"
Expression__2_ = "([OBJECTID] -1) * 0.05"
Field_Type = "FLOAT"

# Process: Select
arcpy.Select_analysis(cross_section_locations_Tejeria, Tej_Cross_Sect_39_41_, Expression)

# Process: Points To Line
arcpy.PointsToLine_management(Tej_Cross_Sect_39_41_, Tej_39_41_line, "", Sort_Field, "NO_CLOSE")

# Process: Create Points Along Lines
arcpy.CreatePointsAlongLines_alonglines(Tej_39_41_line, Tej_39_41_Points_Along_Line, Interval__units_are_in_units_of_input_, "VALUE", "NO_END_POINTS")

# Process: Extract Values to Points
arcpy.gp.ExtractValuesToPoints_sa(Tej_39_41_Points_Along_Line, tejeria_dsm_medium_tif, Tej_39_41_Extracted_Point_Values, Interpolate_values_at_the_point_locations, "VALUE_ONLY")

# Process: Add Field
arcpy.AddField_management(Tej_39_41_Extracted_Point_Values, Field_Name, Field_Type, "", "", "", "", "NULLABLE", "NON_REQUIRED", "")

# Process: Calculate Field
arcpy.CalculateField_management(Tej_39_41_Extracted_Point_Values__2_, Field_Name__2_, Expression__2_, "VB", "")

# Process: Export Feature Attribute to ASCII
arcpy.ExportXYv_stats(Tej_39_41_Extracted_Point_Values__3_, Value_Field, Delimiter, Tej_39_41_csv, Add_Field_Names_to_Output)

Exemplo n.º 27
0
year = 1980

# select layer by location
arcpy.MakeFeatureLayer_management(in_path + "ests" + str(year) + ".shp",
                                  'elyr' + str(year))
arcpy.SelectLayerByLocation_management('elyr' + str(year), 'intersect',
                                       in_path + "Urbanized_area" + ".shp")
arcpy.CopyFeatures_management("elyr" + str(year),
                              out_path + 'usits' + str(year))
print "usits " + str(year)

#export usits attribute to txt
input_features = out_path + "usits" + str(year) + ".shp"
export_ASCII = "usits" + str(year) + ".txt"
arcpy.ExportXYv_stats(input_features, "YEAR", "SPACE", export_ASCII,
                      "ADD_FIELD_NAMES")

gp = arcgisscripting.create()
output = open(r + str(year) + '.csv', 'w')
linewriter = csv.writer(output, delimiter=',')
fcdescribe = gp.Describe(out_path + 'usits' + '.shp')
flds = fcdescribe.Fields

header = []
for fld in flds:
    value = fld.Name
    header.append(value)
linewriter.writerow(header)

cursor = gp.SearchCursor(out_path + 'usits' + '.shp')
row = cursor.Next()
Exemplo n.º 28
0
# Add quotes to raster field
RasterExpression = "AddQuotes(!" + RasterField + "!)"
arcpy.CalculateField_management(Reference_Points_Extract, RasterField,
                                RasterExpression, "PYTHON", codeBlock)

# Add and calculate a value field
ValueField = "Count"
ValueExpression = 1
arcpy.AddField_management(Reference_Points_Extract, ValueField, "Long")
arcpy.CalculateField_management(Reference_Points_Extract, ValueField,
                                ValueExpression, "PYTHON")

# Export raster field and reference field to csv
Value_Field = "\"" + ReferenceField + ";" + RasterField + ";" + ValueField + "\""
arcpy.ExportXYv_stats(Reference_Points_Extract, Value_Field, "COMMA",
                      Reference_CSV, "ADD_FIELD_NAMES")

# Read output CSV into data frame
dataFrame = pandas.read_csv(Reference_CSV)
dataFrame.head()

# Generate pivot table from data frame
pivotTable = pandas.pivot_table(dataFrame,
                                index=RasterField.upper(),
                                columns=[ReferenceField.upper()],
                                values=ValueField.upper(),
                                aggfunc=numpy.sum,
                                fill_value="")

# Generate an excel file to store pivot table
writer = pandas.ExcelWriter(Pivot_Table, engine='xlsxwriter')
     
     ## Process: Resample apriori raster - https://desktop.arcgis.com/en/arcmap/10.3/tools/data-management-toolbox/resample.htm
     print 'Resampling basin raster...'
     arcpy.Resample_management(Basin_Raster, Basin_Raster_resamp, "0.01 0.01", "NEAREST")
     
     ## Process: Clip raster to basin boundary
     print 'Extracting by mask - basin boundary...'
     arcpy.Clip_management(Basin_Raster_resamp, "#", Basin_Raster_resamp_clip, Basin_Boundary, "", "ClippingGeometry","NO_MAINTAIN_EXTENT")
     
     ## Process: Raster to Point
     print 'Raster to point...'
     arcpy.RasterToPoint_conversion(Basin_Raster_resamp_clip, Basin_Points, "VALUE")
     #
     ## Process: Export Feature Attribute to ASCII
     print 'Export attributes to text...'
     arcpy.ExportXYv_stats(Basin_Points + '.shp', "GRID_CODE", "COMMA", Out_text, "ADD_FIELD_NAMES")
     
 if snow == 'on':
     # list all Snow17 grids (directories containing data only) #ignore extra files in directory
     all_snow = [ name for name in os.listdir(SNOW17_Grids_Folder) if os.path.isdir(os.path.join(SNOW17_Grids_Folder, name)) ]    
     all_snow.remove('info') # remove info directory from list of variables 
     for variable in all_snow:
         print ch5id + ' --> ' + variable
         Out_text = output_dir + '\\' + ch5id + '\\' + ch5id + '_' + variable + '.txt' 
 
         ## Local variables:
         RASTER = SNOW17_Grids_Folder + '\\' + variable
         #Basin_Raster = 'P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output\\' + variable
         #Basin_Points = 'P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output\\' + variable + '_points'
         Basin_Raster = 'C:\\NWS\\python\\temp_output\\x' + ch5id
         Basin_Points = 'C:\\NWS\\python\\temp_output\\' + ch5id + '_points'
Exemplo n.º 30
0
                                        fallowQuery)

# Copy selection to a new shapefile
fallowcrops = "H:\\CDL_RGB\\usa_albers\\output_data\\fallow_cropland\\fallowcrops.shp"
arcpy.CopyFeatures_management("point_lyr", fallowcrops)

print "Copy Features: Fallow Cropland completed at", datetime.datetime.now(
).strftime("%I:%M:%S%p")

print "Fallow Cropland export to ASCII starts at", datetime.datetime.now(
).strftime("%I:%M:%S%p")

# Export the attribute table to CSV
Output_ASCII_file = "H:\\CDL_RGB\\usa_albers\\output_data\\fallow_cropland\\csvs\\fallowcrops.csv"
arcpy.ExportXYv_stats(
    fallowcrops,
    "pointid;CDL_2008;CDL_2009;CDL_2010;CDL_2011;CDL_2012;CDL_2013;CDL_2014;CDL_2015;CDL_2016;CDL_2017;CDL_2018;eco3_code;state_code",
    "COMMA", Output_ASCII_file, "ADD_FIELD_NAMES")

print "Step 8.2 Export Fallow Cropland completed at", datetime.datetime.now(
).strftime("%I:%M:%S%p")

#------------------------

#### 8.3 EXPORT FALLOW CROPLAND, AND CROPLAND BASED ON ECOREGION AND STATE
##
# Inputs:  ExCrop2018
# Outputs: one shapefile and one csv for each ecoregion and state (to include the 99 values) for both fallow/idle and cropland

print "\nStep 8.3. Export to csv Based on Ecoregion and State starts at", datetime.datetime.now(
).strftime("%I:%M:%S%p")