def row_in_feature_layer(row: pd.Series, feature_layer: FeatureLayer) -> bool:
    # Null check
    if pd.isna(row['pin_longitude']) or pd.isna(row['pin_latitude']):
        return False
    # Construct a point at the row's coordinates
    pin = Point({"x": row['pin_longitude'], "y": row['pin_latitude']})
    # construct a geometry filter to check if each point is in a disputed area
    pin_filter = intersects(pin)

    continue_query = True
    retries = 0
    MAX_RETRIES = 9
    # Default to setting in_disputed_area = True to ensure we never show pins in disputed area
    in_disputed_area = True
    # Make query to determine whether or not the pin is in the disputed area
    # If the query times out, retry with exponential backoff
    while continue_query:
        try:
            in_disputed_area = len(feature_layer.query(geometry_filter=pin_filter).features) > 0
            continue_query = False
        except Exception as e:
            # send slack message if we exceed retry count
            if retries > MAX_RETRIES:
                body = f'Unable to check if the record with ID {row["source_id"]} is in a disputed region.'
                send_slack_message(body, channel='#dev-logging-etl')
                continue_query = False
            else:
                sleep(1.5**(retries))
                retries += 1

    return in_disputed_area
def thematic_accuracy(out_sdf, df_list, f_thm_acc, them_gis, them_url):

    print('Running Thematic Accuracy')

    f_thm_acc = validate_field(f_thm_acc, list(df_list[0]))

    # List Used for Logging Differences in Population Sources
    pop_diff = []

    for idx, row in enumerate(out_sdf.iterrows()):

        df_current = df_list[idx]

        ##-----------------------------------------------------------------------------
        ## Uses Geoenrichment - Not available outside of AGOL
        # Pull GeoEnrichment Figures
        # enriched = enrich([row[1]['SHAPE']], gis=geo_gis)
        # if 'TOTPOP' not in list(enriched):
        #     enriched_pop = -1
        # else:
        #     enriched_pop = enriched.TOTPOP[0]
        #
        # # Pull Samples From Configured Population Service
        # img_lyr = ImageryLayer(img_url, gis=geo_gis)
        # cells = img_lyr.properties.maxImageHeight * img_lyr.properties.maxImageWidth
        # samples = img_lyr.get_samples(
        #     row[1]['SHAPE'],
        #     geometry_type='esriGeometryPolygon',
        #     sample_count=cells
        # )
        # sample_total = sum([int(sample['value']) for sample in samples])
        #
        # # Push Significant Values Into List for Averaging
        # if enriched_pop or sample_total < 100:
        #     pass
        # else:
        #     diff = abs(enriched_pop - sample_total)
        #     if diff > 100:
        #         pop_diff.append(diff)
        #
        # tot_pop = enriched_pop if enriched_pop > 0 else sample_total
        # tot_pop = tot_pop if tot_pop > 0 else -1

        ##-----------------------------------------------------------------------------

        them_lyr = FeatureLayer(url=them_url, gis=them_gis)

        geom = Geometry(row[1].SHAPE).buffer(-.01)

        sp_filter = filters.intersects(geom, 4326)

        them_sdf = them_lyr.query(geometry_filter=sp_filter,
                                  return_all_records=True).df
        #print(them_sdf)

        if len(df_current) > 0:
            count = len(df_current)
            max_val = df_current[f_thm_acc].max()
            max_scale = 100 * (
                len(df_current[df_current[f_thm_acc] == max_val]) / count)
            min_val = df_current[f_thm_acc].min()
            min_scale = 100 * (
                len(df_current[df_current[f_thm_acc] == min_val]) / count)
            vc = df_current[f_thm_acc].value_counts()
            common = df_current[f_thm_acc].mode()  # Used in MSP
            mean = df_current[f_thm_acc].mean()
            if len(common) > 0:
                common = common[0]
                common_count = vc[common]
                common_per = (vc[common] / count) * 100
            else:
                common = min_val
                common_count = 1
                common_per = 100
            count_2500 = 0
            count_5000 = 0
            count_12500 = 0
            count_25000 = 0
            count_50000 = 0
            count_100000 = 0
            count_250000 = 0
            count_500000 = 0
            count_1000000 = 0
            if 2500 in vc:
                count_2500 = vc[2500]
            if 5000 in vc:
                count_5000 = vc[5000]
            if 12500 in vc:
                count_12500 = vc[12500]
            if 25000 in vc:
                count_25000 = vc[25000]
            if 50000 in vc:
                count_50000 = vc[50000]
            if 100000 in vc:
                count_100000 = vc[100000]
            if 250000 in vc:
                count_250000 = vc[250000]
            if 500000 in vc:
                count_500000 = vc[500000]
            if 1000000 in vc:
                count_1000000 = vc[1000000]

            MSP = get_msp(scale=common)  # SHOULD UPDATE MISSION_PLANNING FIELD

            if not out_sdf['MEAN'][0]:
                m = 0
            else:
                m = out_sdf['MEAN'][0]

            SCORE_VALUE = them_sdf['grls_score'].loc[
                0]  #get_equal_breaks_score(m)# get_equal_breaks_score(output_features, ['MEAN','EQUAL']) # PUT SCORE IN EQUAL

            #GRLS = SCORE_VALUE
            #domScale = common
            # FIELD 1 is the source, Field 2 is the field to be updated
            #df_current['EQUAL'] = SCORE_VALUE # ASSIGNS EQUAL TO LANSCAN_SCALE
            #29 field

            out_sdf.set_value(idx,
                              field_schema.get('them')[0], common)  # median
            out_sdf.set_value(idx,
                              field_schema.get('them')[1],
                              common_count)  # % common
            out_sdf.set_value(idx,
                              field_schema.get('them')[2],
                              round(common_per, 1))
            out_sdf.set_value(idx, field_schema.get('them')[3], min_val)
            out_sdf.set_value(idx,
                              field_schema.get('them')[4], round(min_scale, 1))
            out_sdf.set_value(idx, field_schema.get('them')[5], max_val)
            out_sdf.set_value(idx,
                              field_schema.get('them')[6], round(max_scale, 1))
            out_sdf.set_value(idx, field_schema.get('them')[7], count_2500)
            out_sdf.set_value(idx, field_schema.get('them')[8], count_5000)
            out_sdf.set_value(idx, field_schema.get('them')[9], count_12500)
            out_sdf.set_value(idx, field_schema.get('them')[10], count_25000)
            out_sdf.set_value(idx, field_schema.get('them')[11], count_50000)
            out_sdf.set_value(idx, field_schema.get('them')[12], count_100000)
            out_sdf.set_value(idx, field_schema.get('them')[13], count_250000)
            out_sdf.set_value(idx, field_schema.get('them')[14], count_500000)
            out_sdf.set_value(idx, field_schema.get('them')[15], count_1000000)
            out_sdf.set_value(idx,
                              field_schema.get('them')[16],
                              round(count_2500 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[17],
                              round(count_5000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[18],
                              round(count_12500 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[19],
                              round(count_25000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[20],
                              round(count_50000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[21],
                              round(count_100000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[22],
                              round(count_250000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[23],
                              round(count_500000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[24],
                              round(count_1000000 * 100 / count, 1))
            out_sdf.set_value(idx, field_schema.get('them')[25], count)
            out_sdf.set_value(idx,
                              field_schema.get('them')[26],
                              str(MSP))  #MISSION_PLANNING FIELD
            out_sdf.set_value(idx,
                              field_schema.get('them')[27],
                              SCORE_VALUE)  #), # THEMATIC SCALE VALUE
            #out_sdf.set_value(idx, field_schema.get('them')[27], tot_pop)  # ), # THEMATIC SCALE VALUE
            out_sdf.set_value(idx,
                              field_schema.get('them')[28],
                              population_scale(
                                  common, SCORE_VALUE))  # POPULATION_SCALE
            out_sdf.set_value(idx, field_schema.get('them')[29], mean)
            #to 28

        else:
            for i in range(0, 25):
                out_sdf.set_value(idx, field_schema.get('them')[i], -1)

            out_sdf.set_value(idx, field_schema.get('them')[25], 0)
            out_sdf.set_value(idx, field_schema.get('them')[26], 'N/A')
            out_sdf.set_value(idx, field_schema.get('them')[27], 'N/A')
            out_sdf.set_value(idx, field_schema.get('them')[28], 0)
            out_sdf.set_value(idx, field_schema.get('them')[29], -1)

        del df_current

    #print('Average Difference of Population Estimates: {}'.format(np.average(pop_diff)))

    return out_sdf
Пример #3
0
)
ct_dist = arcgis.features.FeatureLayer(
    "http://services2.arcgis.com/hvBNq5JdIeoqdAq9/arcgis/rest/services/CaltransDistricts/FeatureServer/0/"
)
dist7 = ct_dist.query(where="DIST=7")
origRendInfo = gens.properties.drawingInfo  #good to use when finally drawing geometries!
print(type(origRendInfo))
# print(origRendInfo)
print(origRendInfo['renderer'])

polygon = dist7.features[0].geometry
#create a geometry object from the geometry property of the json object
geomObj = arcgis.geometry.Geometry(polygon)
print(geomObj.type)

gensD7 = gens.query(geometry_filter=filters.intersects(geomObj))
print(len(gensD7), type(gensD7), type(gens))
print(gensD7.features[0])

gensD7_Layer = arcgis.features.FeatureCollection(gensD7.to_dict())
print(type(gensD7_Layer))

# In[53]:

my_gis = GIS()
map1 = my_gis.map()
map1.center = [34.2, -118.5]
map1.zoom = 9

map1.add_layer(gensD7, origRendInfo['renderer'])
#map1.add_layer(dist7)
Пример #4
0
            # Round Lat/Long
            lat = round(lat, 6)
            long = round(long, 6)

            # Convert Lat/Long to USNG
            m = mgrs.MGRS()
            usng_raw = m.toMGRS(lat, long)
            u = str(usng_raw.decode('utf-8'))
            usng = u[0:3] + ' ' + u[3:5] + ' ' + u[5:10] + ' ' + u[10:15]

            # Construct point feature
            geocode_xy = Point({'x': x, 'y': y})

            # Feature layer query to find box alarm areas
            fset_boxalarmareas = fl_boxalarmareas.query(
                geometry_filter=filters.intersects(geocode_xy))

            # Assign box alarm variables
            boxalarm_fire = None
            boxalarm_medical = None
            boxalarm_wildland = None

            # Loop to populate Box Alarm Variables
            for boxalarmarea in fset_boxalarmareas:
                if boxalarmarea.attributes['BoxAlarmType'] == 'FIRE':
                    boxalarm_fire = boxalarmarea.attributes['BoxAlarmNumber']
                elif boxalarmarea.attributes['BoxAlarmType'] == 'MEDICAL':
                    boxalarm_medical = boxalarmarea.attributes[
                        'BoxAlarmNumber']
                elif boxalarmarea.attributes['BoxAlarmType'] == 'WILDLAND':
                    boxalarm_wildland = boxalarmarea.attributes[
Пример #5
0
}, {
    "statisticType": "sum",
    "onStatisticField": "E_NOVEH",
    "outStatisticFieldName": "SUM_NOVEH"
}]
data.delete_features(where='1=1')
for feature in moderate.features:
    weather_geom = feature.geometry
    weather = [weather_geom]
    weather_proj = project(geometries=weather,
                           in_sr=102100,
                           out_sr=4269,
                           transform_forward=True)
    svi_weather_selection = svi.query(where="1=1",
                                      out_statistics=stats,
                                      geometry_filter=intersects(
                                          weather_proj[0]))
    statistics = svi_weather_selection.features[0]
    proj = str(weather_proj[0])
    proj = '{"rings"' + proj[8:]
    statistics = str(statistics)
    statistics = statistics[0:-1]
    statistics = "" + statistics + ', "geometry": ' + proj + '}'
    statistics = json.loads(statistics)
    data.edit_features(adds=[statistics])
for feature in severe.features:
    weather_geom = feature.geometry
    weather = [weather_geom]
    weather_proj = project(geometries=weather,
                           in_sr=102100,
                           out_sr=4269,
                           transform_forward=True)
Пример #6
0
# Process data
errad_summarize = erradicacion_df.groupby(['nombremunicipio'
                                           ])['hectareas'].sum()

updates = []
for row in erradicacion_lyr.query(where="nombremunicipio = '-'"):

    # get attributes
    oid = row.attributes['OBJECTID']
    x = row.geometry["x"]
    y = row.geometry["y"]

    # Create spatial filter
    point = Point({"x": x, "y": y})
    sr = SpatialReference({"wkid": 4326})
    geom_filter = filters.intersects(point, sr)

    # Query data using a point (Identify)
    mpio = mpio_layer.query(
        out_fields="coddane, nombremunicipio, nombredepartamento",
        geometry_filter=geom_filter,
        return_geometry=False)

    # Create update record
    nombre_mpio = mpio.features[0].attributes["nombremunicipio"]
    nombre_depto = mpio.features[0].attributes["nombredepartamento"]
    coddane = mpio.features[0].attributes["coddane"]
    update = {
        "attributes": {
            "objectid": oid,
            "NombreMunicipio": nombre_mpio,