Пример #1
0
def generate_point(shape_file):
    merge = []
    for row in arcpy.da.SearchCursor(shape_file, ['FID', 'SampleSize']):
        sql = """{0} = {1}""".format('FID', row[0])
        out_name = 'FID' + str(row[0])
        constrain_feat = arcpy.Select_analysis(in_features=shape_file,
                                               out_feature_class=out_name,
                                               where_clause=sql)
        if CheckBox_pointfile == 'false':
            arcpy.AddMessage('Creating random points for features...')
            out_name += '_testpoints'
            arcpy.CreateRandomPoints_management(
                out_path=Workspace,
                out_name=out_name,
                constraining_feature_class=constrain_feat,
                number_of_points_or_field=row[1],
                minimum_allowed_distance='1 Meter',
                create_multipoint_output='POINT')
            merge.append(out_name + '.shp')
        else:
            #select points within the polygon
            arcpy.AddMessage('Selecting random points for features...')
            in_feat = arcpy.SelectLayerByLocation_management(
                in_layer=Point_file,
                overlap_type='INTERSECT',
                select_features=out_name + '.shp',
                selection_type='NEW_SELECTION')
            out_name += '_testpoints'
            #print(in_feat)
            arcpy.SubsetFeatures_ga(in_features=in_feat,
                                    out_training_feature_class=out_name,
                                    size_of_training_dataset=row[1],
                                    subset_size_units='ABSOLUTE_VALUE')
            merge.append(out_name + '.shp')
    #print(merge)
    arcpy.AddMessage('Merging files...')
    arcpy.Merge_management(merge, Save_file)

    arcpy.AddMessage('Points generated')
Пример #2
0
def main_task(source_features, comparative_features, source_grid, output_grid,
              output_features, distance, sample_size):
    # begin timer
    t0 = time.clock()

    # setup workspace
    scratch_workspace = env.scratchFolder
    if not os.listdir(scratch_workspace) == '':
        log.info('Cleaning scratch workspace...')
        shutil.rmtree(scratch_workspace)
    if not os.path.exists(scratch_workspace):
        os.mkdir(scratch_workspace)

    log.info('Creating output feature class...')
    output_gdb, output_fc_name = create_gdb(output_features)
    create_result_features(source_grid, output_grid)

    # select sample features
    t1 = time.clock()
    log.info('Selecting samples...')
    sample_source_features = 'in_memory/sample_features'
    arcpy.SubsetFeatures_ga(source_features, sample_source_features, '',
                            sample_size, "PERCENTAGE_OF_INPUT")
    #sample_source_features = select_feature_sample(source_features, comparative_features, distance, sample_size)
    log.info('Selected {0} sample features...'.format(
        int(arcpy.GetCount_management(sample_source_features).getOutput(0))))
    log.info('Selection took {0} minutes to run'.format(
        (time.clock() - t1) / 60))

    # project features
    log.info('Projecting data to common coordinate system...')
    proj_comparative_features = '{0}/{1}'.format(env.scratchGDB,
                                                 'proj_comparative_features')
    arcpy.Project_management(sample_source_features, output_features, sr)
    arcpy.Project_management(comparative_features, proj_comparative_features,
                             sr)

    # # add output fields
    add_fields = [['MEAN_CE90', 'DOUBLE'], ['OFFSET_METERS', 'DOUBLE'],
                  ['OSM_MATCH', 'TEXT']]
    for field in add_fields:
        if not arcpy.ListFields(output_features, field[0]):
            arcpy.AddField_management(output_features, field[0], field[1])

    # perform comparison
    log.info('Performing positional offset calculations...')
    t2 = time.clock()
    sample_oids = get_oids(output_features)
    if multiprocessing.cpu_count() > 1:
        log.info('Using {0} cores.'.format(multiprocessing.cpu_count()))
        results = multiprocess_positional_offset(sample_oids, output_features,
                                                 proj_comparative_features,
                                                 distance)
    else:
        log.info('Only 1 core available. Not using multiprocessing.')
        results = positional_offset(sample_oids, output_features,
                                    proj_comparative_features, distance)

    # update features with results
    update_with_results(output_features, ['OFFSET_METERS', 'OSM_MATCH'],
                        results)
    log.info('Positional offset calculations took {0} minutes to run'.format(
        (time.clock() - t2) / 60))

    # cleanup extra fields
    log.info('Cleaning up output fields...')
    fields_to_keep = [field[0] for field in add_fields
                      ] + [in_fields, in_fields.upper()]
    fields_to_delete = make_delete_lines_list(
        arcpy.ListFields(output_features), fields_to_keep)
    if len(fields_to_delete):
        arcpy.DeleteField_management(output_features, fields_to_delete)

    # project to Equidistcan Cylidrical (world)
    proj_output_features = '{0}/{1}'.format(env.scratchGDB,
                                            'proj_output_features')
    arcpy.Project_management(output_features, proj_output_features,
                             arcpy.SpatialReference(54002))

    # perform grid scoring
    log.info('Performing grid scoring...')
    t3 = time.clock()
    grid_oids = get_oids(output_grid)
    if multiprocessing.cpu_count() > 1:
        log.info('Using {0} cores.'.format(multiprocessing.cpu_count()))
        results = multiprocess_score(grid_oids, output_grid,
                                     proj_output_features)
    else:
        log.info('Only 1 core available. Not using multiprocessing.')
        results = score(grid_oids, output_grid, proj_output_features)

    # update features with results
    update_with_results(output_grid,
                        ['MEAN_OFFSET_METERS', 'PO_SCORE', 'TIER'], results)
    log.info('Grid scoring took {0} minutes to run'.format(
        (time.clock() - t3) / 60))

    # cleanup
    arcpy.Delete_management(proj_comparative_features)
    arcpy.Delete_management(proj_output_features)
    copy_ce(output_features)

    # stop timer
    t4 = (time.clock() - t0) / 60
    log.info('This process took ' + str(t4) +
             ' minutes to run the entire process')
Пример #3
0
arcpy.AddField_management(all_poi, "class", "LONG")
fields0 = [c.name for c in arcpy.ListFields(all_poi) if not c.required]
fields0.remove("class")
##print fields0
arcpy.DeleteField_management(all_poi, fields0)

##//////////////////PREPARING TRAIN AND VALIDATION DATA////////////////////////
arcpy.AddMessage("PREPARING TRAIN AND VALIDATION DATA")
arcpy.AddMessage("Clipping landslides")
arcpy.Clip_analysis(hey_ham, area, "hey.shp")
hey = "hey.shp"
fields1 = [c.name for c in arcpy.ListFields(hey)]
field1 = fields1[0]
arcpy.AddMessage("landslide pixels is converting to point")
arcpy.SubsetFeatures_ga(
    hey, "train_1.shp", "test_1.shp", split_size,
    "PERCENTAGE_OF_INPUT")  ###Splitting landslides as %70 train and %30 test
hey_ras = arcpy.PolygonToRaster_conversion("train_1.shp", field1, "hey_ras",
                                           "CELL_CENTER", "", cell_size)
hey_ras_test = arcpy.PolygonToRaster_conversion("test_1.shp", field1,
                                                "hey_ras_test", "CELL_CENTER",
                                                "", cell_size)
hey_poi = arcpy.RasterToPoint_conversion("hey_ras", "hey_poi.shp", "VALUE")
hey_poi_test = arcpy.RasterToPoint_conversion("hey_ras_test",
                                              "hey_poi_test.shp", "VALUE")

##///////////////////////////////TRAIN DATA////////////////////////////////////
arcpy.AddField_management("hey_poi.shp", "class", "SHORT")
fields2 = [c.name for c in arcpy.ListFields("hey_poi.shp") if not c.required]
fields2.remove("class")
arcpy.DeleteField_management("hey_poi.shp",
Пример #4
0
ap.env.overwriteOutput = True

# paramaters
inputfc = ap.GetParameterAsText(0)
wd = ap.GetParameterAsText(1)
SubSize = ap.GetParameterAsText(2)
SubsetsN = ap.GetParameterAsText(3)

# spatial coordination and transformation.
ap.env.outputCoordinateSystem = ap.SpatialReference(2193)  #NZTM code
ap.env.geographicTransformations = "NZGD_2000_To_WGS_1984_1; New_Zealand_1949_To_NGD_2000_3_NTv2"

# Function to determine whether there is a pre-existing output GDB, and creates one if there is not.
if ap.Exists(wd + "\\" + "SubsetPatch.gdb") == False:
    outgdb_path = ap.CreateFileGDB_management(
        wd, "SubsetPatch.gdb"
    )  # Variable that returns the workspace GDB location and name.
else:
    outgdb_path = wd + "\\SubsetPatch.gdb"

#replication script
ap.env.workspace = outgdb_path
#SubSize = 25        #size of the training feature class subset, can be randomized
incValue = 0  #incremental value
while (incValue < int(SubsetsN)):
    Patch = ap.SubsetFeatures_ga(
        inputfc, wd + "\\SubsetPatch.gdb" + "\\PatchSubset_" + str(incValue),
        "", SubSize, "PERCENTAGE_OF_INPUT")
    ap.TableToExcel_conversion(Patch, ("Excel\\PatchSubset_" + str(incValue)))
    incValue = incValue + 1