def row_in_feature_layer(row: pd.Series, feature_layer: FeatureLayer) -> bool:
    # Null check
    if pd.isna(row['pin_longitude']) or pd.isna(row['pin_latitude']):
        return False
    # Construct a point at the row's coordinates
    pin = Point({"x": row['pin_longitude'], "y": row['pin_latitude']})
    # construct a geometry filter to check if each point is in a disputed area
    pin_filter = intersects(pin)

    continue_query = True
    retries = 0
    MAX_RETRIES = 9
    # Default to setting in_disputed_area = True to ensure we never show pins in disputed area
    in_disputed_area = True
    # Make query to determine whether or not the pin is in the disputed area
    # If the query times out, retry with exponential backoff
    while continue_query:
        try:
            in_disputed_area = len(feature_layer.query(geometry_filter=pin_filter).features) > 0
            continue_query = False
        except Exception as e:
            # send slack message if we exceed retry count
            if retries > MAX_RETRIES:
                body = f'Unable to check if the record with ID {row["source_id"]} is in a disputed region.'
                send_slack_message(body, channel='#dev-logging-etl')
                continue_query = False
            else:
                sleep(1.5**(retries))
                retries += 1

    return in_disputed_area
예제 #2
0
def delete_rows(service_url, where_clause):
    deletes = []
    lyr = FeatureLayer(service_url)
    query_result = lyr.query(where=where_clause, return_ids_only=True)
    deletes = query_result['objectIds']

    if deletes:
        return lyr.edit_features(deletes=str(deletes))
    else:
        return None
예제 #3
0
  def run(self):
    layer = FeatureLayer("https://services-eu1.arcgis.com/CZ1GXX3MIjSRSHoC/ArcGIS/rest/services/Covid19_Impfmeldungen_%c3%96ffentlich/FeatureServer/0")

    start = time.time()
    data = layer.query(order_by_fields='Einrichtung, Meldedatum, Impfungen_proTyp')
    print('> Queried data in %.1fs' % (time.time() - start))

    if len(data) == 0:
      raise Exception('Queried data is empty')

    features_filtered = list(filter_duplicate_days(data.features))
    apply_manual_fixes(features_filtered)
    check_cumulative_plausability(features_filtered)

    rows_nach_einrichtung = list(map(map_nach_einrichtung, features_filtered))
    rows_nach_einrichtung_file = os.path.join('corona-impfungen', 'arcgisImpfungenNachEinrichtung.csv')
    rows_nach_einrichtung_diff = self.get_csv_diff(rows_nach_einrichtung_file, rows_nach_einrichtung)
    if len(rows_nach_einrichtung_diff) > 0:
      self.write_csv_rows(rows_nach_einrichtung_file, rows_nach_einrichtung)

      if self.telegram_bot != None and self.telegram_chat_id != None:
        data = ''.join(rows_nach_einrichtung_diff)
        self.telegram_bot.send_message(
          self.telegram_chat_id,
          '```\n' + (data[:4080] if len(data) > 4080 else data) + '```',
          parse_mode = "Markdown"
        )

    rows_nach_geschlecht = list(itertools.chain.from_iterable(map(map_nach_geschlecht, features_filtered)))
    rows_nach_geschlecht_file = os.path.join('corona-impfungen', 'arcgisImpfungenNachGeschlecht.csv')
    rows_nach_geschlecht_diff = self.get_csv_diff(rows_nach_geschlecht_file, rows_nach_geschlecht)
    if len(rows_nach_geschlecht_diff) > 0:
      self.write_csv_rows(rows_nach_geschlecht_file, rows_nach_geschlecht)

      if self.telegram_bot != None and self.telegram_chat_id != None:
        data = ''.join(rows_nach_geschlecht_diff)
        self.telegram_bot.send_message(
          self.telegram_chat_id,
          '```\n' + (data[:4080] if len(data) > 4080 else data) + '```',
          parse_mode = "Markdown"
        )

    rows_nach_alter = list(itertools.chain.from_iterable(map(map_nach_alter, features_filtered)))
    rows_nach_alter_file = os.path.join('corona-impfungen', 'arcgisImpfungenNachAlter.csv')
    rows_nach_alter_diff = self.get_csv_diff(rows_nach_alter_file, rows_nach_alter)
    if len(rows_nach_alter_diff) > 0:
      self.write_csv_rows(rows_nach_alter_file, rows_nach_alter)

      if self.telegram_bot != None and self.telegram_chat_id != None:
        data = ''.join(rows_nach_alter_diff)
        self.telegram_bot.send_message(
          self.telegram_chat_id,
          '```\n' + (data[:4080] if len(data) > 4080 else data) + '```',
          parse_mode = "Markdown"
        )
예제 #4
0
    def run(self):
        csv_filename = os.path.join('corona-fallzahlen',
                                    'arcgisInzidenzGemeinden.csv')
        current_rows = self.read_csv_rows(csv_filename)

        layer = FeatureLayer(
            "https://services-eu1.arcgis.com/CZ1GXX3MIjSRSHoC/ArcGIS/rest/services/EBE_Gemeinden_Inzidenztabelle_3/FeatureServer/0"
        )

        start = time.time()
        data = layer.query(order_by_fields='Ort, Datum_Meldung')
        print('> Queried data in %.1fs' % (time.time() - start))

        if len(data) == 0:
            raise Exception('Queried data is empty')

        if len(data) < len(current_rows) * (1 / 1.5):
            raise Exception(
                'Queried data has much less items (%d) than current data (%d)'
                % (len(data), len(current_rows)))

        if len(data) > len(current_rows) * 1.5:
            raise Exception(
                'Queried data has much more items (%d) than current data (%d)'
                % (len(data), len(current_rows)))

        rows = list(
            map(
                lambda x: {
                    'datum':
                    datetime.utcfromtimestamp(x.attributes['Datum_Meldung'] /
                                              1000).strftime('%Y-%m-%d'),
                    'ort':
                    x.attributes['Ort'],
                    'neuPositiv':
                    str(x.attributes['positiv_neu']),
                    'inzidenz7tage':
                    str(round(x.attributes['inzidenz_letzte7Tage'], 2)),
                }, data.features))

        csv_diff = self.get_csv_diff(csv_filename, rows)

        if len(csv_diff) == 0:
            return

        if self.telegram_bot != None and self.telegram_chat_id != None:
            data = ''.join(csv_diff)
            self.telegram_bot.send_message(
                self.telegram_chat_id,
                '```\n' + (data[:4080] if len(data) > 4080 else data) + '```',
                parse_mode="Markdown")

        self.write_csv_rows(csv_filename, rows)
def main(arguments):
    # initialize logger
    logger = initialize_logging(arguments.log_file)
    # Create the GIS
    logger.info("Authenticating...")
    # First step is to get authenticate and get a valid token
    gis = GIS(arguments.org_url,
              username=arguments.username,
              password=arguments.password,
              verify_cert=not arguments.skip_ssl_verification)

    # Get the feature layer
    if gis.content.get(arguments.item_id):
        logger.info("Getting feature layer")
        item = gis.content.get(arguments.item_id)
        mirror_layer = item.layers[0]
        if arguments.lkl_layer_url:
            lkl_layer = FeatureLayer(url=arguments.lkl_layer_url)
        else:
            logger.info("Please pass an LKL layer url!")
            sys.exit(0)

        # Query LKL and mirror layer
        lkl_fset = lkl_layer.query('1=1', out_sr=3857)
        if len(lkl_fset) == 0:
            logger.info("No LKLs in your layer yet!")
            sys.exit(0)
        mirror_fset = mirror_layer.query('1=1', out_sr=3857)

        add_features = []
        update_features = []
        logger.info("Iterating through current LKL data")
        for feature in lkl_fset:
            for mirror_feature in mirror_fset:
                # use "in" instead of == comparison due to the potential for brackets to be in the GUID field
                if mirror_feature.attributes[return_field_name(
                        mirror_layer, "global_id")].lower(
                        ) in feature.attributes["globalid"].lower():
                    update_features.append(feature)
                    break
            else:
                add_features.append(feature)

        logger.info("Posting updated data to mirrored layer")
        mirror_layer.edit_features(adds=add_features,
                                   updates=update_features,
                                   use_global_ids=True)
        logger.info("Completed!")
    else:
        logger.info("Item not found")
def main(arguments):
    # initialize logger
    logger = initialize_logging(arguments.log_file)
    # Create the GIS
    logger.info("Authenticating...")
    # First step is to get authenticate and get a valid token
    gis = GIS(arguments.org_url,
              username=arguments.username,
              password=arguments.password,
              verify_cert=not arguments.skip_ssl_verification)
    if not gis.properties.isPortal:
        logger.error("This script only works with ArcGIS Enterprise")
        sys.exit(0)

    logger.info("Getting location tracking service")
    try:
        tracks_layer = gis.admin.location_tracking.tracks_layer
    except Exception as e:
        logger.info(e)
        logger.info(
            "Getting location tracking service failed - check that you are an admin and that location tracking is enabled for your organization"
        )
        sys.exit(0)

    logger.info("Getting polygon layer")
    try:
        layer = FeatureLayer(url=args.layer_url, gis=gis)
        _ = layer._lyr_json
    except Exception as e:
        logger.info(e)
        logger.info(
            "Layer could not be found based on given input. Please check your parameters again. Exiting the script"
        )
        sys.exit(0)

    features = layer.query(where=args.where, out_sr=3857).features
    if len(features) > 0:
        geometries = [feature.geometry for feature in features]
        logger.info("Unifying geometry data")
        union_geometry = geometry.union(spatial_ref=3857,
                                        geometries=geometries,
                                        gis=gis)
        if args.symmetric_difference:
            union_geometry['rings'] = form_donut(union_geometry['rings'])
        intersect_filter = geometry.filters.intersects(union_geometry, sr=3857)
        logger.info("Querying features")
        x = tracks_layer.delete_features(geometry_filter=intersect_filter)
        logger.info("Deleting features")
        logger.info("Deleted: " + str(len(x['deleteResults'])) + " tracks")
        logger.info("Completed!")
예제 #7
0
def get_features_from_feature_server(url, query):
    """
    Given a url to a Feature Server, return a list
    of Features (for example, parking lots that are not full)

    :param url: url for Feature Server
    :param query: query to select features
                  example: {'where': '1=1', 'out_sr': '4326'}
    :return: list of all features returned from the query
    """
    features = []
    f = FeatureLayer(url=url)
    feature_set = f.query(**query)
    for feature in feature_set:
        features.append(feature.as_dict)
    return features
예제 #8
0
def get_features_from_feature_server(url, query):
    """
    Given a url to a City of Boston Feature Server, return a list
    of Features (for example, parking lots that are not full)

    :param url: url for Feature Server
    :param query: a JSON object (example: { 'where': '1=1', 'out_sr': '4326' })
    :return: list of all features returned from the query
    """

    logger.debug('url received: ' + url + ', query received: ' + str(query))

    features = []
    f = FeatureLayer(url=url)
    feature_set = f.query(**query)
    for feature in feature_set:
        features.append(feature.as_dict)
    return features
예제 #9
0
def get_features_from_feature_server(url, query):
    """
    Given a url to a City of Boston Feature Server, return a list
    of Features (for example, parking lots that are not full)
    
    :param url: url for Feature Server
    :param query: query to select features (example: "Spaces > 0")
    :return: list of all features returned from the query
    """

    logger.debug('url received: ' + url + ', query received: ' + query)

    features = []
    f = FeatureLayer(url = url)
    feature_set = f.query(where = query)
    for feature in feature_set:
        features.append(feature.as_dict)
    return features
    def run(self):
        csv_filename = os.path.join('corona-impfungen', 'arcgisImpfungen.csv')
        current_rows = self.read_csv_rows(csv_filename)

        layer = FeatureLayer(
            "https://services-eu1.arcgis.com/CZ1GXX3MIjSRSHoC/ArcGIS/rest/services/EBE_Gesamtsummen_Impfmeldungen_Öffentlich/FeatureServer/0"
        )

        start = time.time()
        data = layer.query(order_by_fields='Meldedatum')
        print('> Queried data in %.1fs' % (time.time() - start))

        if len(data) == 0:
            raise Exception('Queried data is empty')

        if len(data) < len(current_rows) * (1 / 1.5):
            raise Exception(
                'Queried data has much less items (%d) than current data (%d)'
                % (len(data), len(current_rows)))

        if len(data) > len(current_rows) * 1.5:
            raise Exception(
                'Queried data has much more items (%d) than current data (%d)'
                % (len(data), len(current_rows)))

        rows = list(map(feature_to_row, data.features))

        csv_diff = self.get_csv_diff(csv_filename, rows)

        if len(csv_diff) == 0:
            return

        if self.telegram_bot != None and self.telegram_chat_id != None:
            data = ''.join(csv_diff)
            self.telegram_bot.send_message(
                self.telegram_chat_id,
                '```\n' + (data[:4080] if len(data) > 4080 else data) + '```',
                parse_mode="Markdown")

        self.write_csv_rows(csv_filename, rows)
def temporal_currency(gis, df_current, output_features, grid_filter, geom,
                      in_fields):
    """ main driver of program """
    try:

        out_fl = FeatureLayer(gis=gis, url=output_features)
        out_sdf = out_fl.query(geometry_filter=grid_filter,
                               return_geometry=True,
                               return_all_records=True).df

        ##---cut stuff above-----
        sq = df_current['SHAPE'].disjoint(geom) == False
        df_current = df_current[sq].copy()
        if len(df_current) > 0:
            dates = df_current[in_fields].tolist()
            count = len(dates)
            date_list_strings = [d for d in dates]
            date_list = [get_datetime(d) for d in dates]
            year_list = [int(x.year) for x in date_list]
            dom_year, dom_year_count = Counter(year_list).most_common()[0]
            dom_date, dom_date_count = Counter(
                get_datetime_string(date_list)).most_common()[0]
            count_picket_fences = sum(
                non_std == datetime.datetime(1902, 1, 1, 0, 0)
                for non_std in date_list)
            count_non_std_dates = sum(
                non_std == datetime.datetime(1901, 1, 1, 0, 0)
                for non_std in date_list) + count_picket_fences
            date_list_minus = [
                x for x in date_list
                if (x != datetime.datetime(1901, 1, 1, 0, 0)
                    and x != datetime.datetime(1902, 1, 1, 0, 0))
            ]
            if len(date_list_minus) > 0:
                if dom_date == '1902-1-1' or dom_date == '1902-01-01':
                    dom_date = non_std_date
                    dom_year = non_std_year
                    sccore = 6
                    oldest = min(get_datetime_string(date_list_minus))
                    newest = max(get_datetime_string(date_list_minus))
                    change_list = [diff_date(dd) for dd in date_list_minus]
                    count_2year = sum(x <= 2 for x in change_list)
                    count_5year = sum((x <= 5 and x > 2) for x in change_list)
                    count_10year = sum(
                        (x <= 10 and x > 5) for x in change_list)
                    count_15year = sum(
                        (x <= 15 and x > 10) for x in change_list)
                    count_15year_plus = sum(x >= 15 for x in change_list)
                elif dom_date == '1901-1-1' or dom_date == '1901-01-01':
                    dom_date = 'NoInformation'
                    dom_year = 0
                    score = 6
                    oldest = min(get_datetime_string(date_list_minus))
                    newest = max(get_datetime_string(date_list_minus))
                    change_list = [diff_date(dd) for dd in date_list_minus]
                    count_2year = sum(x <= 2 for x in change_list)
                    count_5year = sum((x <= 5 and x > 2) for x in change_list)
                    count_10year = sum(
                        (x <= 10 and x > 5) for x in change_list)
                    count_15year = sum(
                        (x <= 15 and x > 10) for x in change_list)
                    count_15year_plus = sum(x >= 15 for x in change_list)
                else:
                    dom_date = dom_date
                    dom_year = dom_year
                    oldest = min(get_datetime_string(date_list_minus))
                    newest = max(get_datetime_string(date_list_minus))
                    change_list = [diff_date(dd) for dd in date_list_minus]
                    count_2year = sum(x <= 2 for x in change_list)
                    count_5year = sum((x <= 5 and x > 2) for x in change_list)
                    count_10year = sum(
                        (x <= 10 and x > 5) for x in change_list)
                    count_15year = sum(
                        (x <= 15 and x > 10) for x in change_list)
                    count_15year_plus = sum(x >= 15 for x in change_list)
                    score = get_currency_score(dom_year)
            else:
                if dom_date == '1902-01-01':
                    dom_date = non_std_date
                    dom_year = non_std_year
                    oldest = non_std_date
                    newest = non_std_date
                    change_list = 0
                    count_2year = 0
                    count_5year = 0
                    count_10year = 0
                    count_15year = 0
                    count_15year_plus = 0
                    score = 6
                else:
                    dom_date = 'NoInformation'
                    dom_year = 0
                    oldest = 'NoInformation'
                    newest = 'NoInformation'
                    change_list = 0
                    count_2year = 0
                    count_5year = 0
                    count_10year = 0
                    count_15year = 0
                    count_15year_plus = 0
                    score = 6

            out_sdf[FIELDS[0]][0] = dom_date
            out_sdf[FIELDS[1]][0] = dom_date_count
            out_sdf[FIELDS[2]][0] = round(dom_date_count * 100.0 / count, 1)
            out_sdf[FIELDS[3]][0] = dom_year
            out_sdf[FIELDS[4]][0] = dom_year_count
            out_sdf[FIELDS[5]][0] = round(dom_year_count * 100.0 / count, 1)
            out_sdf[FIELDS[6]][0] = oldest
            out_sdf[FIELDS[7]][0] = newest
            out_sdf[FIELDS[8]][0] = count_non_std_dates
            out_sdf[FIELDS[9]][0] = round(
                float(count_non_std_dates) * 100.0 / count, 1)
            out_sdf[FIELDS[10]][0] = round(
                float(count_2year) * 100.0 / count, 1)
            out_sdf[FIELDS[11]][0] = round(
                float(count_5year) * 100.0 / count, 1)
            out_sdf[FIELDS[12]][0] = round(
                float(count_10year) * 100.0 / count, 1)
            out_sdf[FIELDS[13]][0] = round(
                float(count_15year) * 100.0 / count, 1)
            out_sdf[FIELDS[14]][0] = round(
                float(count_15year_plus) * 100.0 / count, 1)
            out_sdf[FIELDS[15]][0] = int(count)
            out_sdf[FIELDS[16]][0] = int(score)

        else:
            out_sdf[FIELDS[0]][0] = "None"
            out_sdf[FIELDS[1]][0] = 0
            out_sdf[FIELDS[2]][0] = 0
            out_sdf[FIELDS[3]][0] = 0
            out_sdf[FIELDS[4]][0] = 0
            out_sdf[FIELDS[5]][0] = 0
            out_sdf[FIELDS[6]][0] = "None"
            out_sdf[FIELDS[7]][0] = "None"
            out_sdf[FIELDS[8]][0] = 0
            out_sdf[FIELDS[9]][0] = 0
            out_sdf[FIELDS[10]][0] = 0
            out_sdf[FIELDS[11]][0] = 0
            out_sdf[FIELDS[12]][0] = 0
            out_sdf[FIELDS[13]][0] = 0
            out_sdf[FIELDS[14]][0] = 0
            out_sdf[FIELDS[15]][0] = 0
            out_sdf[FIELDS[16]][0] = 0

        return out_sdf, out_fl

##        out_sdf_as_featureset = out_sdf.to_featureset()
##        print(out_sdf_as_featureset)
##        out_fl.edit_features(updates=out_sdf_as_featureset)
##
##        del df_current
##        del ext
##        del geom

    except FunctionError as f_e:
        messages = f_e.args[0]
##        arcpy.AddError("error in function: %s" % messages["function"])
##        arcpy.AddError("error on line: %s" % messages["line"])
##        arcpy.AddError("error in file name: %s" % messages["filename"])
##        arcpy.AddError("with error message: %s" % messages["synerror"])
##        arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
def source_lineage_by_grids(gis,
                            input_features,
                            output_features,
                            search_field,
                            value_field,
                            search_val=1001):
    try:

        out_fl = FeatureLayer(gis=gis, url=output_features)
        out_sdf = out_fl.query(return_geometry=True,
                               return_all_records=True).df

        print(out_sdf)

        sr = {'wkid': 4326}
        sp_rel = "esriSpatialRelIntersects"

        for idx, row in enumerate(out_sdf.iterrows()):
            print(idx)
            geom = row[1].SHAPE

            sp_filter = filters._filter(geom, sr, sp_rel)

            data_fl = FeatureLayer(url=input_features)
            #out_fields=in_fields,
            df_sub = data_fl.query(geometry_filter=sp_filter,
                                   return_geometry=True,
                                   return_all_records=False,
                                   out_fields=",".join(
                                       [search_field, value_field])).df

            if len(df_sub) > 0:

                #print(df_sub.head())

                #df_sub = df_current.loc[df_current.disjoint(geom) == False].copy()
                #df_sub = df = df_sub.loc[df_sub[search_field] == search_val].copy()

                df_sub = df_sub.replace({np.nan: "NULL"})

                grp = df_sub.groupby(by=value_field).size()  # Get the counts.
                #print(grp)

                #print(df_sub.head())

                # sort the values to get the biggest on the top
                #pandas 0.18
                try:
                    grp.sort_values(axis=0,
                                    ascending=False,
                                    inplace=True,
                                    kind='quicksort',
                                    na_position='last')
                #pandas 0.16
                except:
                    grp.sort(axis=0,
                             ascending=False,
                             inplace=True,
                             kind='quicksort',
                             na_position='last')

                #test = df_sub[value_field].unique().tolist()
                #print(",".join(test))

                if len(grp) > 1:
                    grp = grp.head(2)
                    out_sdf.set_value(
                        idx, FIELDS[0], ",".join(
                            filter(None,
                                   df_sub[value_field].unique().tolist())))
                    out_sdf.set_value(idx, FIELDS[1], grp.index[0])
                    out_sdf.set_value(idx, FIELDS[2], int(grp[0]))
                    out_sdf.set_value(
                        idx, FIELDS[3],
                        float(grp[0]) * 100.0 / float(len(df_sub)))
                    out_sdf.set_value(idx, FIELDS[4], grp.index[1])
                    out_sdf.set_value(idx, FIELDS[5], int(grp[1]))
                    out_sdf.set_value(
                        idx, FIELDS[6],
                        float(grp[1]) * 100.0 / float(len(df_sub)))

                elif len(grp) == 0:
                    out_sdf.set_value(idx, FIELDS[0], 'None')
                    out_sdf.set_value(idx, FIELDS[1], 'None')
                    out_sdf.set_value(idx, FIELDS[2], 0)
                    out_sdf.set_value(idx, FIELDS[3], float(0))
                    out_sdf.set_value(idx, FIELDS[4], 'None')
                    out_sdf.set_value(idx, FIELDS[5], 0)
                    out_sdf.set_value(idx, FIELDS[6], float(0))

                elif len(grp) == 1:
                    out_sdf.set_value(
                        idx, FIELDS[0], ",".join(
                            filter(None,
                                   df_sub[value_field].unique().tolist())))
                    out_sdf.set_value(idx, FIELDS[1], grp.index[0])
                    out_sdf.set_value(idx, FIELDS[2], int(grp[0]))
                    out_sdf.set_value(
                        idx, FIELDS[3],
                        float(grp[0]) * 100.0 / float(len(df_sub)))
                    out_sdf.set_value(idx, FIELDS[4], 'None')
                    out_sdf.set_value(idx, FIELDS[5], 0)
                    out_sdf.set_value(idx, FIELDS[6], float(0))
            else:
                print("No Data")

        return out_sdf, out_fl

    except FunctionError as f_e:
        messages = f_e.args[0]
        print('EXCEPTION HIT')
def source_lineage(gis,
                   df_current,
                   output_features,
                   grid_filter,
                   geom,
                   search_field,
                   value_field,
                   search_val=1001):
    """ main driver of program """
    try:

        out_fl = FeatureLayer(gis=gis, url=output_features)
        out_sdf = out_fl.query(geometry_filter=grid_filter,
                               return_geometry=True,
                               return_all_records=True).df

        df_sub = df_current.loc[df_current.disjoint(geom) == False].copy()

        if search_field:
            df_sub = df_sub.loc[df_sub[search_field] == search_val].copy()

        df_sub = df_sub.replace({np.nan: "NULL"})

        grp = df_sub.groupby(by=value_field).size()  # Get the counts.
        # sort the values to get the biggest on the top
        #pandas 0.18
        try:
            grp.sort_values(axis=0,
                            ascending=False,
                            inplace=True,
                            kind='quicksort',
                            na_position='last')
        #pandas 0.16
        except:
            grp.sort(axis=0,
                     ascending=False,
                     inplace=True,
                     kind='quicksort',
                     na_position='last')

        if len(grp) > 1:
            grp = grp.head(2)
            out_sdf[FIELDS[0]][0] = ",".join(
                df_sub[value_field].unique().tolist())
            out_sdf[FIELDS[1]][0] = grp.index[0]
            out_sdf[FIELDS[2]][0] = int(grp[0])
            out_sdf[FIELDS[3]][0] = float(grp[0]) * 100.0 / float(len(df_sub))
            out_sdf[FIELDS[4]][0] = grp.index[1]
            out_sdf[FIELDS[5]][0] = int(grp[1])
            out_sdf[FIELDS[6]][0] = float(grp[1]) * 100.0 / float(len(df_sub))

        elif len(grp) == 0:
            out_sdf[FIELDS[0]][0] = 'None'
            out_sdf[FIELDS[1]][0] = 'None'
            out_sdf[FIELDS[2]][0] = 0
            out_sdf[FIELDS[3]][0] = float(0)
            out_sdf[FIELDS[4]][0] = 'None'
            out_sdf[FIELDS[5]][0] = 0
            out_sdf[FIELDS[6]][0] = float(0)

        elif len(grp) == 1:
            out_sdf[FIELDS[0]][0] = ",".join(
                df_sub[value_field].unique().tolist())
            out_sdf[FIELDS[1]][0] = grp.index[0]
            out_sdf[FIELDS[2]][0] = int(grp[0])
            out_sdf[FIELDS[3]][0] = float(grp[0]) * 100.0 / float(len(df_sub))
            out_sdf[FIELDS[4]][0] = 'None'
            out_sdf[FIELDS[5]][0] = 0
            out_sdf[FIELDS[6]][0] = float(0)

##        out_sdf_as_featureset = out_sdf.to_featureset()
##        print(out_sdf_as_featureset)
##        out_fl.edit_features(updates=out_sdf_as_featureset)

        return out_sdf, out_fl

    except FunctionError as f_e:
        messages = f_e.args[0]
        #log.error("error in function: %s" % messages["function"])
        #log.error("error on line: %s" % messages["line"])
        #log.error("error in file name: %s" % messages["filename"])
        #log.error("with error message: %s" % messages["synerror"])
        #log.error("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
예제 #14
0
from pprint import pprint

from arcgis.gis import GIS
from arcgis.features import FeatureLayer

lyr_url = r'https://services9.arcgis.com/7eQuDPPaB6g9029K/arcgis/rest/services/COVID_19_Alerts_Editable/FeatureServer/0'
str_field = 'Dates'
date_field = 'DateValue'
profile = 'agol_graphc'
where_clause = '1=1'

gis = GIS(profile='agol_graphc')
layer = FeatureLayer(url=lyr_url)
fields = [str_field, date_field]
test_dt = datetime.datetime.now()
query_result = layer.query(out_fields=",".join(fields), where=where_clause)
updates = []

for row in query_result:
    date_str = row.attributes[str_field]
    date_val = parser.parse(date_str).replace(hour=12)
    if date_val > test_dt:
        date_val = date_val + relativedelta(years=-1)
    date_ms = date_val.timestamp() * 1000
    if row.attributes[date_field] != date_ms:
        row.attributes[date_field] = date_ms
        updates.append(row)

if updates:
    pprint(layer.edit_features(updates=updates))
else:
from arcgis.gis import GIS
from arcgis.features import FeatureLayer
import pandas as pd

portal = GIS("https://www.arcgis.com")

fs_url = "https://services7.arcgis.com/F4iYwOOvXLYhkPXF/ArcGIS/rest/services/USFS_Nantahala_Pisgah_Trails_updated/FeatureServer/1"
fs = FeatureLayer(fs_url)
fs

#for f in fs.properties.fields:
#    print(f['name'])

query1 = fs.query(where="Trail_Name='LINVILLE GORGE'").sdf
query1

query1.to_csv(r"C:\Users\yuri7100\Desktop\test\test.csv")

예제 #16
0
def main(arguments):
	# Initialize logging
	logger = initialize_logging(arguments.log_file)

	# Create the GIS
	logger.info("Authenticating...")

	# First step is to get authenticate and get a valid token
	gis = GIS(arguments.org_url,
			  username=arguments.username,
			  password=arguments.password,
			  verify_cert=not arguments.skip_ssl_verification)

	logger.info("Getting workforce project")

	# Get the workforce project
	item = gis.content.get(arguments.project_id)
	try:
		project = workforce.Project(item)
	except Exception as e:
		logger.info(e)
		logger.info("Invalid project id")
		sys.exit(0)
	
	# Get Survey or Collector Feature Layer
	layer = None
	if arguments.survey_id and arguments.layer_url:
		logger.info("Please try again with either survey id or layer url provided, not both")
		sys.exit(0)
	elif arguments.survey_id:
		survey_item = gis.content.get(arguments.survey_id)
		if survey_item:
			layer = survey_item.layers[0]
	elif arguments.layer_url:
		layer = FeatureLayer(arguments.layer_url)
	else:
		logger.info("Please provide either a portal id for your survey feature layer or a feature service URL for your survey/collector layer")
		sys.exit(0)
		
	# Check if layer exists
	try:
		x = layer.properties
	except Exception as e:
		logger.info(e)
		logger.info("Layer could not be found based on given input. Please check your parameters again. Exiting the script")
		sys.exit(0)
	
	# Updating Assignments
	logger.info("Querying assignments")
	assignments = project.assignments.search()
	to_update = []
	for assignment in assignments:
		if assignment.work_order_id and (assignment.status == "unassigned" or assignment.status == "assigned" or assignment.status == "declined"):
			where = f"{arguments.field_name} = '{assignment.work_order_id}'"
			if layer.query(where=where, return_count_only=True) > 0:
				logger.info(f"Potential Assignment to Cancel: {str(assignment)} with OBJECTID {assignment.object_id}")
				if gis.properties["isPortal"]:
					portal_url = gis.properties['portalHostname']
					logger.info(f"Assignment Link: {portal_url}/apps/workforce/#/projects/{arguments.project_id}/dispatch/assignments/{assignment.object_id}")
				else:
					logger.info(f"Assignment Link: https://workforce.arcgis.com/projects/{arguments.project_id}/dispatch/assignments/{assignment.object_id}")
				if arguments.cancel_assignments:
					logger.info("Canceling assignment")
					assignment.update(status="canceled")
					to_update.append(assignment)
	if arguments.cancel_assignments:
		project.assignments.batch_update(to_update)
	logger.info("Completed!")
def positional_accuracy(gis, df_current, output_features, grid_filter, geom, value_field):
    """ main driver of program """
    try:
        PDVERSION = [int(v) for v in pd.__version__.split('.')]

        out_fl = FeatureLayer(gis=gis, url=output_features)
        out_sdf = out_fl.query(geometry_filter=grid_filter,return_geometry=True,
            return_all_records=True).df

        sq = df_current['SHAPE'].disjoint(geom) == False
        df_current = df_current[sq].copy()
        if len(df_current) > 0:
            df_notnull = df_current.loc[df_current[value_field].notnull() == True]
            if PDVERSION[1] <= 16:
                df_notnull = df_notnull.drop(value_field, axis=1).join(df_notnull[value_field].astype(float,raise_on_error=False)).copy()
            elif PDVERSION[1] > 16:
                df_notnull = df_notnull.drop(value_field, axis=1).join(df_notnull[value_field].apply(pd.to_numeric, errors='coerce')).copy()  # CHANGES NON NUMERIC ROWS to NaN
            df_notnull = df_notnull.loc[df_notnull[value_field].notnull() == True].copy() # Drops NaN values
            not_null_count = len(df_notnull)
            null_count = len(df_current) - not_null_count
            if PDVERSION[1] == 16:
                try:
                    s = df_notnull.loc[df_notnull[value_field] != 'No Information', value_field].copy().astype(np.float64)
                except:
                    s = df_notnull.loc[df_notnull[value_field].astype(str) != 'No Information', value_field].copy().astype(np.float64)
            elif PDVERSION[1] > 16:
                s = df_notnull.drop(value_field, axis=1).join(df_notnull[value_field].apply(pd.to_numeric, errors='coerce'))[value_field].copy() # Drops Text Fields
            s = s[s.notnull() == True].copy() # Drops NaN values
            mean = s.mean()
            median = s.median()
            mode = s.mode()
            if len(mode) > 0:
                mode = mode[0]
            else:
                mode = 0
            mmax = s.max()
            mmin = s.min()
            score = get_score(mean)
            null_percent = float(null_count) * 100.0 / float(len(df_current))

            if not pd.isnull(mean):
                out_sdf[FIELDS[0]][0]=round(mean,1)
            else:
                out_sdf[FIELDS[0]][0]=-1
            if not pd.isnull(median):
                out_sdf[FIELDS[1]][0]=median
            else:
                out_sdf[FIELDS[1]][0]=-1

            if not pd.isnull(mode):
                out_sdf[FIELDS[2]][0]=mode
            else:
                out_sdf[FIELDS[2]][0]=-1

            if not pd.isnull(mmin):
                out_sdf[FIELDS[3]][0]=mmin
            else:
                out_sdf[FIELDS[3]][0]=-1

            if not pd.isnull(mmax):
                out_sdf[FIELDS[4]][0]=mmax
            else:
                out_sdf[FIELDS[4]][0]=-1

            out_sdf[FIELDS[5]][0]=null_count
            out_sdf[FIELDS[6]][0]=round(null_percent,1)
            out_sdf[FIELDS[7]][0]=len(df_current)#not_null_count
            out_sdf[FIELDS[8]][0]=score
            out_sdf[FIELDS[9]][0]=get_tier(score)

            del df_notnull
            del mean
            del median
            del mode
            del mmax
            del mmin
            del score
            del null_percent
        else:
            out_sdf[FIELDS[0]][0]=-1
            out_sdf[FIELDS[1]][0]=-1
            out_sdf[FIELDS[2]][0]=-1
            out_sdf[FIELDS[3]][0]=-1
            out_sdf[FIELDS[4]][0]=-1
            out_sdf[FIELDS[5]][0]=0
            out_sdf[FIELDS[6]][0]=0
            out_sdf[FIELDS[7]][0]=0
            out_sdf[FIELDS[8]][0]=0
            out_sdf[FIELDS[9]][0]="No Ranking"
            #r = tuple([oid] + [-1]*5 + [0] * 4 + ["No Ranking"])

        return out_sdf, out_fl


    except FunctionError as f_e:
        messages = f_e.args[0]
        #arcpy.AddError("error in function: %s" % messages["function"])
        #arcpy.AddError("error on line: %s" % messages["line"])
        #arcpy.AddError("error in file name: %s" % messages["filename"])
        #arcpy.AddError("with error message: %s" % messages["synerror"])
        #arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
    except:
        line, filename, synerror = trace()
예제 #18
0
class DailyCovid19TestingByPostcode(object):
    def __init__(self, service_url=default_service_url):
        self.layer = FeatureLayer(service_url)
        self.postcode_field = 'Postcode'
        self.date_field = 'Date'
        self.date_code_field = 'DateCode'
        self.tests_field = 'Tests'
        self.total_tests_field = 'TotalTests'
        self.date_code_format = '%Y%m%d'

    def query(self, where_clause):
        return self.layer.query(where=where_clause)

    def update(self, rows, allow_deletes=False):
        """
        updates the feature service by updating or adding the submitted rows.  Rows are identified by date_code and postcode.
        Date values are derived from the date_code values.
        Existing XYs are not updated, submitted xys are used if rows are added.
        :param rows: [{'postcode': str, 'date_code': str, 'tests': int, 'total_tests': int, 'xy': {'x': float, 'y': float}}]
        :type rows: [dict]
        :param allow_deletes: If set to True any records not found in the source rows will be deleted.  Default=False
        :type allow_deletes: bool
        :return:
        :rtype:
        """

        # build lookup from submitted rows:
        items = {}
        for row in rows:
            key = '{}_{}'.format(row['postcode'], row['date_code'])
            items[key] = row

        deletes = []
        updates = []
        query_result = self.layer.query()
        for row in query_result:
            poa = row.attributes[self.postcode_field]
            date_code = row.attributes[self.date_code_field]
            key = '{}_{}'.format(poa, date_code)
            if allow_deletes and key not in items:
                deletes.append(
                    row.attributes[query_result.object_id_field_name])

            item = items.pop(key, None)
            if item:
                update_required = False
                if row.attributes[self.tests_field] != item['tests']:
                    row.attributes[self.tests_field] = item['tests']
                    update_required = True
                if row.attributes[
                        self.total_tests_field] != item['total_tests']:
                    row.attributes[
                        self.total_tests_field] = item['total_tests']
                    update_required = True
                if update_required:
                    updates.append(row)

        # any remaining data_models items are new records
        adds = []
        new_items = items.values()
        if new_items:
            for new_item in new_items:
                date = datetime.datetime.strptime(new_item['date_code'],
                                                  '%Y%m%d')
                row = {
                    "attributes": {
                        self.date_field: date,
                        self.postcode_field: new_item['postcode'],
                        self.date_code_field: new_item['date_code'],
                        self.total_tests_field: new_item['total_tests'],
                        self.tests_field: new_item['tests']
                    },
                    "geometry": new_item['xy']
                }
                adds.append(row)

        logging.info('Updating: {}'.format(self.layer.url))
        logging.info('ADDS: {}'.format(len(adds)))
        logging.info('DELETES: {}'.format(len(deletes)))
        logging.info('UPDATES: {}'.format(len(updates)))
        if updates or adds or deletes:
            return self.layer.edit_features(updates=updates,
                                            adds=adds,
                                            deletes=str(deletes))
        else:
            return None

    def format_date_string(self, date_string, in_format):
        """
        Reformats the submitted date or datetime string to the correct format for the date_code field
        :type date_string: str
        :param in_format: the format string of the submitted date_string.  eg: '%Y-%m-%d'
        :type in_format: str
        :return: The reformatted date_string
        :rtype: str
        """

        if in_format == self.date_code_format:
            return date_string

        dt = datetime.datetime.strptime(date_string, in_format)
        return dt.strftime(self.date_code_format)
예제 #19
0
from arcgis.features import FeatureLayer
""" Local Variables """

url_gis = r"https://ladot.maps.arcgis.com"  # URL to AGOL or Portal
url_fl = r"https://services3.arcgis.com/vq5vGR4r1YX7ueLg/arcgis/rest/services/Speed_Hump_Layer/FeatureServer/0"  # URL for feature layer to download as feature class
user = arcpy.GetParameterAsText(1)  # AGOL or Portal username
pwd = arcpy.GetParameterAsText(2)  # user password
arcpy.env.workspace = arcpy.GetParameterAsText(0)  # path to file geodatabase
fc_copy = r"SpeedHumpCopy"  # name of copied feature class
fc = r"SpeedHump\SpeedHumpAnalysis"  # name of main speed hump feature class

try:
    """ Copy Layer from AGOL to GDB """

    gis = GIS(url_gis, user, pwd)
    fl = FeatureLayer(url_fl)
    fs = fl.query()
    fs.save(arcpy.env.workspace, fc_copy)
    """ Replace Features in Feature Class and Delete Copy """

    arcpy.DeleteRows_management(fc)
    arcpy.Append_management(fc_copy, fc, "NO_TEST")
    arcpy.Delete_management(fc_copy)
    arcpy.AddMessage("SUCCESS")

except Exception as err:
    """ Error Handling """

    arcpy.AddError(err)
    sys.exit(1)
예제 #20
0
def completeness(gis, df_after, df_before, output_features, grid_filter, geom):
    """ main driver of program """
    try:

        out_fl = FeatureLayer(gis=gis, url=output_features)
        out_sdf = out_fl.query(geometry_filter=grid_filter,return_geometry=True,
                return_all_records=True).df

        geometry_type = df_after.geometry_type

        sq = df_before[df_before.geometry.notnull()].geometry.disjoint(geom) == False
        df_before = df_before[sq].copy()
        before_count = len(df_before)
        sq = df_after[df_after.geometry.notnull()].geometry.disjoint(geom) == False
        df_after = df_after[sq].copy()
        after_count = len(df_after)
        geoms_after = df_after.clip(geom.extent)
        geoms_before = df_before.clip(geom.extent)

        geoms_before_sdf = SpatialDataFrame(geometry=geoms_before)
        geoms_after_sdf = SpatialDataFrame(geometry=geoms_after)

        q_after = geoms_after_sdf.geometry.JSON == '{"paths":[]}'
        geoms_after_sdf = geoms_after_sdf[~q_after].copy()
        geoms_after_sdf.reset_index(inplace=True, drop=True)
        q_before = geoms_before_sdf.geometry.JSON == '{"paths":[]}'
        geoms_before_sdf = geoms_before_sdf[~q_before].copy()
        geoms_before_sdf.reset_index(inplace=True, drop=True)

        if geometry_type == "Polygon":
            before_val = geoms_before_sdf.geometry.get_area('GEODESIC','SQUAREKILOMETERS').sum()
            after_val = geoms_after_sdf.geometry.get_area('GEODESIC','SQUAREKILOMETERS').sum()
            if after_val > 0:
                score = get_score(ratio=before_val/after_val,
                        baseVal=before_val,
                        inputVal=after_val)
            else:
                score = get_score(0, before_val, after_val)

            out_sdf[FIELDS[0]][0] = round(before_val,1)
            out_sdf[FIELDS[1]][0] = round(after_val,1)
            out_sdf[FIELDS[3]][0] = round(before_val - after_val,1)
            out_sdf[FIELDS[2]][0] = score

        elif geometry_type == "Polyline":
            before_val = geoms_before_sdf.geometry.get_length('GEODESIC','KILOMETERS').sum()
            after_val = geoms_after_sdf.geometry.get_length('GEODESIC','KILOMETERS').sum()

            if after_val > 0:
                score = get_score(ratio=before_val/after_val,
                        baseVal=before_val,
                        inputVal=after_val)
            else:
                score = get_score(0, before_val, after_val)

            out_sdf[FIELDS[0]][0] = round(before_val,1)
            out_sdf[FIELDS[1]][0] = round(after_val,1)
            out_sdf[FIELDS[3]][0] = round(before_val - after_val,1)
            out_sdf[FIELDS[2]][0] = score

        else:
            before_count = len(geoms_before_sdf)
            after_count = len(geoms_after_sdf)
            if after_count > 0:
                score = get_score(ratio=before_count/after_count,
                        baseVal=before_count,
                        inputVal=after_count)
            else:
                score = get_score(ratio=0,
                        baseVal=before_count,
                        inputVal=after_count)

            out_sdf[FIELDS[0]][0] = before_count
            out_sdf[FIELDS[1]][0] = after_count
            out_sdf[FIELDS[3]][0] = before_count - after_count
            out_sdf[FIELDS[2]][0] = score

        del sq
        del df_after
        del df_before
        del geom

        return out_sdf, out_fl

        #arcpy.SetParameterAsText(4, out_grid)
    except FunctionError as f_e:
        messages = f_e.args[0]

    except:
        line, filename, synerror = trace()

#--------------------------------------------------------------------------
##if __name__ == "__main__":
##    #env.overwriteOutput = True
##    argv = tuple(arcpy.GetParameterAsText(i)
##    for i in range(arcpy.GetArgumentCount()))
##    main(*argv)
def main(event, context):

    # Cityworks settings
    global baseUrl
    baseUrl = event["cityworks"]["url"]
    cwUser = event["cityworks"]["username"]
    cwPwd = event["cityworks"]["password"]

    cw_services.url = baseUrl

    # ArcGIS Online/Portal settings
    orgUrl = event["arcgis"]["url"]
    username = event["arcgis"]["username"]
    password = event["arcgis"]["password"]
    layers = event["arcgis"]["layers"]
    tables = event["arcgis"]["tables"]
    layerfields = event["fields"]["layers"]
    tablefields = event["fields"]["tables"]
    fc_flag = event["flag"]["field"]
    flag_values = [event["flag"]["on"], event["flag"]["off"]]
    ids = event["fields"]["ids"]
    probtypes = event["fields"]["type"]

    if log_to_file:
        from datetime import datetime as dt
        id_log = path.join(sys.path[0], "cityworks_log.log")
        log = open(id_log, "a")
        log.write("\n\n{}\n".format(dt.now()))

    try:
        # Connect to org/portal
        gis = GIS(orgUrl, username, password)

        # Get token for CW
        auth_response = cw_services.authenticate(cwUser, cwPwd)
        if auth_response['Status'] != 0:
            raise Exception("Cityworks not authenticated")

        # get wkid
        sr = get_wkid()

        if sr == "error":
            if log_to_file:
                log.write("Spatial reference not defined\n")
            else:
                print("Spatial reference not defined\n")
            raise Exception("Spatial reference not defined")

        # get problem types
        prob_types = get_problem_types()

        if prob_types == "error":
            if log_to_file:
                log.write("Problem types not defined\n")
            else:
                print("Problem types not defined\n")
            raise Exception("Problem types not defined")

        for layer in layers:
            lyr = FeatureLayer(layer, gis=gis)
            oid_fld = lyr.properties.objectIdField

            # Get related table URL
            reltable = ""
            for relate in lyr.properties.relationships:
                url_pieces = layer.split("/")
                url_pieces[-1] = str(relate["relatedTableId"])
                table_url = "/".join(url_pieces)

                if table_url in tables:
                    reltable = table_url
                    break

            # query reports
            sql = "{}='{}'".format(fc_flag, flag_values[0])
            rows = lyr.query(where=sql, out_sr=sr)
            updated_rows = []

            for row in rows.features:
                oid = row.attributes[oid_fld]

                # Submit feature to the Cityworks database
                requestid = submit_to_cw(row, prob_types, layerfields, oid,
                                         probtypes)

                try:
                    if "WARNING" in requestid:
                        if log_to_file:
                            log.write(
                                "Warning generated while copying record to Cityworks: {}\n"
                                .format(requestid))
                        else:
                            print(
                                "Warning generated while copying record to Cityworks: {}\n"
                                .format(requestid))
                        continue
                    else:
                        pass  # requestID is str = ok
                except TypeError:
                    pass  # requestID is a number = ok

                # attachments
                attachmentmgr = AttachmentManager(lyr)
                attachments = attachmentmgr.get_list(oid)

                for attachment in attachments:
                    response = copy_attachment(attachmentmgr, attachment, oid,
                                               requestid)
                    if response["Status"] is not 0:
                        if log_to_file:
                            log.write(
                                "Error while copying attachment to Cityworks: {}\n"
                                .format(response["ErrorMessages"]))
                        else:
                            print(
                                "Error while copying attachment to Cityworks: {}\n"
                                .format(response["ErrorMessages"]))

                # update the record in the service so that it evaluates falsely against sql
                sql = "{}='{}'".format(oid_fld, oid)
                row_orig = lyr.query(where=sql).features[0]
                row_orig.attributes[fc_flag] = flag_values[1]
                try:
                    row_orig.attributes[ids[1]] = requestid
                except TypeError:
                    row_orig.attributes[ids[1]] = str(requestid)

                updated_rows.append(row_orig)

            # apply edits to updated features
            if updated_rows:
                status = lyr.edit_features(updates=updated_rows)
                if log_to_file:
                    log.write(
                        "Status of updates to ArcGIS layers: {}\n".format(
                            status))
                else:
                    print("Status of updates to ArcGIS layers: {}\n".format(
                        status))

            rel_records = []
            updated_rows = []
            # related records
            rellyr = FeatureLayer(reltable, gis=gis)
            relname = rellyr.properties['name']

            pkey_fld = lyr.properties.relationships[0]["keyField"]
            fkey_fld = rellyr.properties.relationships[0]["keyField"]
            sql = "{}='{}'".format(fc_flag, flag_values[0])
            rel_records = rellyr.query(where=sql)
            updated_rows = []

            for record in rel_records:
                rel_oid = record.attributes[oid_fld]
                parent = get_parent(lyr, pkey_fld, record, fkey_fld)

                # Upload comment attachments
                try:
                    attachmentmgr = rellyr.attachments
                    attachments = attachmentmgr.get_list(rel_oid)
                    for attachment in attachments:
                        response = copy_attachment(attachmentmgr, attachment,
                                                   rel_oid,
                                                   parent.attributes[ids[1]])
                        if response["Status"] is not 0:
                            try:
                                error = response["ErrorMessages"]
                            except KeyError:
                                error = response["Message"]
                            msg = "Error copying attachment. Record {} in table {}: {}".format(
                                rel_oid, relname, error)
                            if log_to_file:
                                log.write(msg + '\n')
                            else:
                                print(msg)
                except RuntimeError:
                    pass  # table doesn't support attachments

                # Process comments
                response = copy_comments(record, parent, tablefields, ids)

                if 'error' in response:
                    if log_to_file:
                        log.write('Error accessing comment table {}\n'.format(
                            relname))
                    else:
                        print(
                            'Error accessing comment table {}'.format(relname))
                    break

                elif response["Status"] is not 0:
                    try:
                        error = response["ErrorMessages"]
                    except KeyError:
                        error = response["Message"]
                    msg = "Error copying record {} from {}: {}".format(
                        rel_oid, relname, error)
                    if log_to_file:
                        log.write(msg + '\n')
                    else:
                        print(msg)
                    continue
                else:
                    record.attributes[fc_flag] = flag_values[1]
                    try:
                        record.attributes[ids[1]] = parent.attributes[ids[1]]
                    except TypeError:
                        record.attributes[ids[1]] = str(
                            parent.attributes[ids[1]])

                    updated_rows.append(record)

            # apply edits to updated records
            if updated_rows:
                status = rellyr.edit_features(updates=updated_rows)
                if log_to_file:
                    log.write(
                        "Status of updates to ArcGIS comments: {}\n".format(
                            status))
                else:
                    print("Status of updates to ArcGIS comments: {}\n".format(
                        status))

            print("Finished processing: {}".format(lyr.properties["name"]))

    except Exception as ex:
        print("error: " + str(ex))

    if log_to_file:
        log.close()
예제 #22
0
#!/usr/bin/env python3

import sys

from arcgis.features import FeatureLayer

from vaccine_feed_ingest.utils.log import getLogger

logger = getLogger(__file__)

url = "https://dhsgis.wi.gov/server/rest/services/DHS_COVID19/COVID19_Vaccine_Provider_Sites/MapServer/0"

output_dir = sys.argv[1]
if output_dir is None:
    logger.error("Must pass an output_dir as first argument")
    sys.exit(1)

layer = FeatureLayer(url)
results = layer.query(return_all_records=True)
results.save(output_dir, "wi_arcgis_map.json")
예제 #23
0
class IndividualCasesByPostcodeFeatureLayer(object):
    def __init__(self,
                 service_url: str,
                 date_string_field='DateString',
                 postcode_field='PostCode',
                 report_date_field='ReportDate',
                 report_age_field='ReportAge',
                 oid_field='OBJECTID'):
        self.layer = FeatureLayer(service_url)
        self.oid_field = oid_field
        self.dateString_field = date_string_field
        self.postcode_field = postcode_field
        self.reportAge_field = report_age_field
        self.reportDate_field = report_date_field

    @staticmethod
    def _get_source_values(case_list,
                           postcode_centroids,
                           default_xy=(17100000, -4600000)):
        """
        Generates the update values by combining counts by date and postcode from the case list
        and postcode centroids from the geometry source.

        :param case_list: The CaseList containing the cases to be used to update the FeatureLayer
        :type case_list: CaseList
        :param postcode_centroids: A lookup of postcode centroid geometries
        :type postcode_centroids: dict
        :param default_xy: Optional - The xy tuple to be used if no matching postcode geometry is found.
        Default is (17100000, -4600000), a WGS84-WMAS coordinate that falls of the NSW/VIC coast.
        :type default_xy: tuple (float, float)
        :return: {yyyymmddpppp: {'date_string': str, 'date': datetime, 'postcode': int, 'report_age': int, 'count': int, 'xy': (float, float)}
        :rtype: dict
        """

        result = case_list.counts_by_date_and_postcode()

        # add the coordinate values to each item.
        for item in result.values():
            postcode_centroid = postcode_centroids.get(item['postcode'], None)
            if postcode_centroid:
                item['xy'] = postcode_centroid
            else:
                item['xy'] = {'x': default_xy[0], 'y': default_xy[1]}

        return result

    def update(self,
               case_list,
               postcode_centroids,
               default_xy=(17100000, -4600000)):

        source_values = self._get_source_values(case_list, postcode_centroids,
                                                default_xy)

        # get featureset from target service
        query_result = self.layer.query()
        postcode_features = query_result.features

        adds = []
        deletes = []
        updates = []
        unchanged = 0

        for postcode_feature in postcode_features:
            postcode = postcode_feature.attributes[self.postcode_field]
            datestring = postcode_feature.attributes[self.dateString_field]
            key_value = datestring + postcode
            item = source_values.get(key_value, None)
            if item:
                if item['count'] == 1:
                    """ Remove the item from source if all source items have been matched.
                    Additional items in the feature layer with the same key will be deleted."""
                    source_values.pop(key_value)
                item['count'] -= 1
                if item['report_age'] != postcode_feature.attributes[
                        self.reportAge_field]:
                    """If the report age values are not equal, updates are required.
                    We do not need to compare postcode or date values because they are defined
                    at record creation and do not change. A source record where the postcode or date changes
                    will result in a deletion of the current record and a creation of a new record with the
                    new date/postcode key."""
                    postcode_feature.attributes[
                        self.reportAge_field] = item['report_age']
                    updates.append(postcode_feature)
                else:
                    unchanged += 1

            else:  # if no match found in source, add the OID to the deletes
                deletes.append(postcode_feature.attributes[self.oid_field])

        # any items left in the source_values represent new records.  Append them to the Adds list.
        for source_value in source_values.values():
            item = {
                "attributes": {
                    self.postcode_field: source_value['postcode'],
                    self.dateString_field: source_value['date_string'],
                    self.reportDate_field: source_value['date'],
                    self.reportAge_field: source_value['report_age']
                },
                "geometry": source_value['xy']
            }

            for i in range(source_value['count']):
                adds.append(item)

        if updates or adds or deletes:
            self.layer.edit_features(updates=updates,
                                     adds=adds,
                                     deletes=str(deletes))

        return UpdateResultCounts(adds=len(adds),
                                  deletes=len(deletes),
                                  updates=len(updates),
                                  unchanged=unchanged)
예제 #24
0
class TotalCasesByPostcodeFeatureLayer(object):
    def __init__(self,
                 service_url: str,
                 postcode_field: str = 'PostCode',
                 total_cases_field: str = 'TotalCases',
                 date_of_last_case_field: str = 'DateOfLastCase',
                 days_since_last_case_field: str = 'DaysSinceLastCase'):
        self.layer = FeatureLayer(service_url)
        self.postcode_field = postcode_field
        self.totalCases_field = total_cases_field
        self.dateOfLastCase_field = date_of_last_case_field
        self.daysSinceLastCase_field = days_since_last_case_field

    def _get_updates(self, postcode_features: FeatureSet, update_values: dict):
        """

        :param postcode_features: The postcodes featureset to be updated
        :type postcode_features: FeatureSet
        :param update_values: The current count values to be applied.
        :type update_values:
        :return:
        :rtype:
        """
        updates = []
        no_change = 0

        for postcode_feature in postcode_features:
            postcode = postcode_feature.attributes[self.postcode_field]
            updated_counts = update_values.pop(postcode, None)
            if updated_counts:
                total_cases = updated_counts['Total Cases']
                date_last = updated_counts['DateOfLastCase']
                days_since = updated_counts['DaysSinceLastCase']
            else:
                total_cases = None
                date_last = None
                days_since = None

            current_cases = postcode_feature.attributes[self.totalCases_field]
            current_days_since = postcode_feature.attributes[
                self.daysSinceLastCase_field]
            if current_cases != total_cases or days_since != current_days_since:
                postcode_feature.attributes[
                    self.totalCases_field] = total_cases
                postcode_feature.attributes[
                    self.dateOfLastCase_field] = date_last
                postcode_feature.attributes[
                    self.daysSinceLastCase_field] = days_since
                updates.append(postcode_feature)
            else:
                no_change += 1

        return updates, no_change

    def _get_adds(self, update_values):
        new_postcodes = []
        for new_item in update_values:
            # Create new item for service adds.  Explicitly assigning the values to a new item prevents any possible errors
            # if the item has additional fields.
            item = {
                "attributes": {
                    self.postcode_field: new_item['Postcode'],
                    self.totalCases_field: new_item['Total Cases'],
                    self.dateOfLastCase_field: new_item['DateOfLastCase'],
                    self.daysSinceLastCase_field: new_item['DaysSinceLastCase']
                },
                "geometry": {
                    "x": 17100000,
                    "y": -4600000
                }
            }

            new_postcodes.append(item)

        return new_postcodes

    def update_from_case_list(self, case_list: CaseList):
        """
        Updates the layer using the values in the case_list parameter.

        If a postcode in the service is not found in the case_list then the value for that postcode is set to None.
        Postcodes are never deleted from the service.

        If postcodes are found in the case_list that are not in the table, then those postcodes are added to the service.

        This function does not alter the case_list object.

        :param case_list: The case list to be used to update the feature layer.
        :type case_list: CaseList
        :return:
        :rtype: UpdateResultCounts
        """
        # make a local copy of postcode_counts so we don't inadvertently change the source dictionary for other uses.
        update_values = case_list.counts_by_postcode()

        # get featureset from target service
        query_result = self.layer.query()
        postcode_features = query_result.features

        updates, no_change = self._get_updates(postcode_features,
                                               update_values)
        adds = self._get_adds(update_values)

        if updates or adds:
            self.layer.edit_features(updates=updates, adds=adds)

        return UpdateResultCounts(adds=len(adds),
                                  updates=len(updates),
                                  unchanged=no_change)

    def get_geometry_lookups(self, where_clause: str = None):
        """
        Gets a dictionary of {key: geometry pairs}
        :param where_clause:
        :type where_clause:
        :return:
        :rtype:
        """
        result = {}
        if where_clause:
            query_result = self.layer.query(out_fields=[self.postcode_field],
                                            where=where_clause)
        else:
            query_result = self.layer.query(out_fields=[self.postcode_field])

        postcode_features = query_result.features
        for postcode_feature in postcode_features:
            postcode = postcode_feature.attributes[self.postcode_field]
            geometry = postcode_feature.geometry
            result[postcode] = geometry

        return result
def thematic_accuracy(out_sdf, df_list, f_thm_acc, them_gis, them_url):

    print('Running Thematic Accuracy')

    f_thm_acc = validate_field(f_thm_acc, list(df_list[0]))

    # List Used for Logging Differences in Population Sources
    pop_diff = []

    for idx, row in enumerate(out_sdf.iterrows()):

        df_current = df_list[idx]

        ##-----------------------------------------------------------------------------
        ## Uses Geoenrichment - Not available outside of AGOL
        # Pull GeoEnrichment Figures
        # enriched = enrich([row[1]['SHAPE']], gis=geo_gis)
        # if 'TOTPOP' not in list(enriched):
        #     enriched_pop = -1
        # else:
        #     enriched_pop = enriched.TOTPOP[0]
        #
        # # Pull Samples From Configured Population Service
        # img_lyr = ImageryLayer(img_url, gis=geo_gis)
        # cells = img_lyr.properties.maxImageHeight * img_lyr.properties.maxImageWidth
        # samples = img_lyr.get_samples(
        #     row[1]['SHAPE'],
        #     geometry_type='esriGeometryPolygon',
        #     sample_count=cells
        # )
        # sample_total = sum([int(sample['value']) for sample in samples])
        #
        # # Push Significant Values Into List for Averaging
        # if enriched_pop or sample_total < 100:
        #     pass
        # else:
        #     diff = abs(enriched_pop - sample_total)
        #     if diff > 100:
        #         pop_diff.append(diff)
        #
        # tot_pop = enriched_pop if enriched_pop > 0 else sample_total
        # tot_pop = tot_pop if tot_pop > 0 else -1

        ##-----------------------------------------------------------------------------

        them_lyr = FeatureLayer(url=them_url, gis=them_gis)

        geom = Geometry(row[1].SHAPE).buffer(-.01)

        sp_filter = filters.intersects(geom, 4326)

        them_sdf = them_lyr.query(geometry_filter=sp_filter,
                                  return_all_records=True).df
        #print(them_sdf)

        if len(df_current) > 0:
            count = len(df_current)
            max_val = df_current[f_thm_acc].max()
            max_scale = 100 * (
                len(df_current[df_current[f_thm_acc] == max_val]) / count)
            min_val = df_current[f_thm_acc].min()
            min_scale = 100 * (
                len(df_current[df_current[f_thm_acc] == min_val]) / count)
            vc = df_current[f_thm_acc].value_counts()
            common = df_current[f_thm_acc].mode()  # Used in MSP
            mean = df_current[f_thm_acc].mean()
            if len(common) > 0:
                common = common[0]
                common_count = vc[common]
                common_per = (vc[common] / count) * 100
            else:
                common = min_val
                common_count = 1
                common_per = 100
            count_2500 = 0
            count_5000 = 0
            count_12500 = 0
            count_25000 = 0
            count_50000 = 0
            count_100000 = 0
            count_250000 = 0
            count_500000 = 0
            count_1000000 = 0
            if 2500 in vc:
                count_2500 = vc[2500]
            if 5000 in vc:
                count_5000 = vc[5000]
            if 12500 in vc:
                count_12500 = vc[12500]
            if 25000 in vc:
                count_25000 = vc[25000]
            if 50000 in vc:
                count_50000 = vc[50000]
            if 100000 in vc:
                count_100000 = vc[100000]
            if 250000 in vc:
                count_250000 = vc[250000]
            if 500000 in vc:
                count_500000 = vc[500000]
            if 1000000 in vc:
                count_1000000 = vc[1000000]

            MSP = get_msp(scale=common)  # SHOULD UPDATE MISSION_PLANNING FIELD

            if not out_sdf['MEAN'][0]:
                m = 0
            else:
                m = out_sdf['MEAN'][0]

            SCORE_VALUE = them_sdf['grls_score'].loc[
                0]  #get_equal_breaks_score(m)# get_equal_breaks_score(output_features, ['MEAN','EQUAL']) # PUT SCORE IN EQUAL

            #GRLS = SCORE_VALUE
            #domScale = common
            # FIELD 1 is the source, Field 2 is the field to be updated
            #df_current['EQUAL'] = SCORE_VALUE # ASSIGNS EQUAL TO LANSCAN_SCALE
            #29 field

            out_sdf.set_value(idx,
                              field_schema.get('them')[0], common)  # median
            out_sdf.set_value(idx,
                              field_schema.get('them')[1],
                              common_count)  # % common
            out_sdf.set_value(idx,
                              field_schema.get('them')[2],
                              round(common_per, 1))
            out_sdf.set_value(idx, field_schema.get('them')[3], min_val)
            out_sdf.set_value(idx,
                              field_schema.get('them')[4], round(min_scale, 1))
            out_sdf.set_value(idx, field_schema.get('them')[5], max_val)
            out_sdf.set_value(idx,
                              field_schema.get('them')[6], round(max_scale, 1))
            out_sdf.set_value(idx, field_schema.get('them')[7], count_2500)
            out_sdf.set_value(idx, field_schema.get('them')[8], count_5000)
            out_sdf.set_value(idx, field_schema.get('them')[9], count_12500)
            out_sdf.set_value(idx, field_schema.get('them')[10], count_25000)
            out_sdf.set_value(idx, field_schema.get('them')[11], count_50000)
            out_sdf.set_value(idx, field_schema.get('them')[12], count_100000)
            out_sdf.set_value(idx, field_schema.get('them')[13], count_250000)
            out_sdf.set_value(idx, field_schema.get('them')[14], count_500000)
            out_sdf.set_value(idx, field_schema.get('them')[15], count_1000000)
            out_sdf.set_value(idx,
                              field_schema.get('them')[16],
                              round(count_2500 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[17],
                              round(count_5000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[18],
                              round(count_12500 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[19],
                              round(count_25000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[20],
                              round(count_50000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[21],
                              round(count_100000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[22],
                              round(count_250000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[23],
                              round(count_500000 * 100 / count, 1))
            out_sdf.set_value(idx,
                              field_schema.get('them')[24],
                              round(count_1000000 * 100 / count, 1))
            out_sdf.set_value(idx, field_schema.get('them')[25], count)
            out_sdf.set_value(idx,
                              field_schema.get('them')[26],
                              str(MSP))  #MISSION_PLANNING FIELD
            out_sdf.set_value(idx,
                              field_schema.get('them')[27],
                              SCORE_VALUE)  #), # THEMATIC SCALE VALUE
            #out_sdf.set_value(idx, field_schema.get('them')[27], tot_pop)  # ), # THEMATIC SCALE VALUE
            out_sdf.set_value(idx,
                              field_schema.get('them')[28],
                              population_scale(
                                  common, SCORE_VALUE))  # POPULATION_SCALE
            out_sdf.set_value(idx, field_schema.get('them')[29], mean)
            #to 28

        else:
            for i in range(0, 25):
                out_sdf.set_value(idx, field_schema.get('them')[i], -1)

            out_sdf.set_value(idx, field_schema.get('them')[25], 0)
            out_sdf.set_value(idx, field_schema.get('them')[26], 'N/A')
            out_sdf.set_value(idx, field_schema.get('them')[27], 'N/A')
            out_sdf.set_value(idx, field_schema.get('them')[28], 0)
            out_sdf.set_value(idx, field_schema.get('them')[29], -1)

        del df_current

    #print('Average Difference of Population Estimates: {}'.format(np.average(pop_diff)))

    return out_sdf
def main():
    # Create log file
    with open(path.join(sys.path[0], 'attr_log.log'), 'a') as log:
        log.write('\n{}\n'.format(dt.now()))

        # connect to org/portal
        if username:
            gis = GIS(orgURL, username, password)
        else:
            gis = GIS(orgURL)

        for service in services:
            try:
                # Connect to source and target layers
                fl_source = FeatureLayer(service['source url'], gis)
                fl_target = FeatureLayer(service['target url'], gis)

                # get field map
                fields = [[key, service['fields'][key]] for key in service['fields'].keys()]

                # Get source rows to copy
                rows = fl_source.query(service['query'])
                adds = []
                updates = []

                for row in rows:
                    # Build dictionary of attributes & geometry in schema of target layer
                    # Default status and priority values can be overwritten if those fields are mapped to reporter layer
                    attributes = {'status': 0,
                                  'priority': 0}

                    for field in fields:
                        attributes[field[1]] = row.attributes[field[0]]

                    new_request = {'attributes': attributes,
                                   'geometry': {'x': row.geometry['x'],
                                                'y': row.geometry['y']}}
                    adds.append(new_request)

                    # update row to indicate record has been copied
                    if service['update field']:
                        row.attributes[service['update field']] = service['update value']
                        updates.append(row)

                # add records to target layer
                if adds:
                    add_result = fl_target.edit_features(adds=adds)
                    for result in add_result['updateResults']:
                        if not result['success']:
                            raise Exception('error {}: {}'.format(result['error']['code'],
                                                                  result['error']['description']))

                # update records:
                if updates:
                    update_result = fl_source.edit_features(updates=updates)
                    for result in update_result['updateResults']:
                        if not result['success']:
                            raise Exception('error {}: {}'.format(result['error']['code'],
                                                                  result['error']['description']))

            except Exception as ex:
                msg = 'Failed to copy feature from layer {}'.format(service['url'])
                print(ex)
                print(msg)
                log.write('{}\n{}\n'.format(msg, ex))
    data_item = gis.content.get(item_id)
    data_url = data_item.url

    # prompt user for which layer to export
    layers = data_item.layers
    layer_choice = 0
    if len(layers) > 1:

        # print out choices
        print("Choices for layers are:")
        for layer in layers:
            print("{}: {}".format(layer.properties.id,
                                  layer.properties.name))

        # prompt user for choice
        layer_choice = input("Index of layer: ")

    # construct layer url and get name
    layer_url = '/'.join([data_url, str(layer_choice)])

    # make feature layer, query, and then save
    feature_layer = FeatureLayer(layer_url, gis)
    layer_name = feature_layer.properties.name.replace(" ", "_")
    featureSet = feature_layer.query(where='1=1', out_fields='*')
    featureSet.save(directory, layer_name)

    print("completed export of {}".format(layer_name))

except Exception as e:
    print(e)
예제 #28
0
class FeatureServiceHelper(FeatureSourceHelper):
    def __init__(self, source, id_field):
        super().__init__(source=source, id_field=id_field)
        self.layer = FeatureLayer(source)

    def query(self, fields=None, where_clause=None):
        if fields is None:
            fields = '*'
        elif isinstance(fields, list):
            fields = ','.join(fields)
        if where_clause is None:
            where_clause = '1=1'

        return self.layer.query(out_fields=fields, where=where_clause)

    def load_records(self, fields=None, where_clause=None):
        return self.query(fields=fields, where_clause=where_clause).to_dict('records')

    @staticmethod
    def _field_types(query_result):
        result = {}
        for field in query_result.fields:
            result[field['name']] = field['type']

        return result

    def field_names(self):
        return self.layer.properties.fields

    def update_records(self, new_data, fields=None, where_clause=None, add_new=False, delete_unmatched=False,
                       rounding=4, case_sensitive=True, shape_field='Shape'):
        """
        Updates the feature class using the new_data, with each record uniquely identified by the self.record_id_field.
        :param new_data: Dictionary records indexed by id.  {record_id: {field_name1: value1, field_name2: value2,...}}
        :type new_data: dict
        :param fields: The list of fields to be updated.  If an id_field is included, it will not be updated.
        :type fields:
        :param where_clause: An optional where clause to filter the updates.
        :type where_clause:
        :param add_new: If true, any records found in the new_data that do not have a corresponding record in the feature class will be deleted.
        :type add_new: bool
        :param delete_unmatched:  If true, any feature class records found that do not have a corresponding record in the new_data will be deleted.
        :type delete_unmatched: bool
        :param rounding: decimal rounding to be used when comparing double format numbers.
        :type rounding: int
        :param case_sensitive:
        :type case_sensitive:
        :param shape_field: The name of the geometry field in the new_data.  Set to None if geometry is not to be updated.  Default='Shape'
        :type shape_field: str
        :return:
        :rtype:
        """
        self._rounding = rounding
        self._case_sensitive = case_sensitive

        # use a local copy of new_data because we will be popping items.
        my_data = copy.deepcopy(new_data)

        if fields and self.id_field not in fields:
            fields.append(self.id_field)

        query_result = self.query(fields=fields, where_clause=where_clause)
        field_types = self._field_types(query_result)

        updates = []
        deletes = []

        for row in query_result:
            id_value = row.attributes[self.id_field]
            new_row = my_data.pop(id_value, None)
            if new_row:
                if self.update_row(row=row, field_types=field_types, new_values=new_row, shape_field=shape_field):
                    updates.append(row)
            elif delete_unmatched:
                deletes.append(row.attributes[query_result.object_id_field_name])

        adds = []
        if add_new:
            # any remaining data_models items are new records
            for id_value, new_item in my_data.items():
                new_geometry = new_item.pop(shape_field, None)
                row = self.generate_new_row(new_item, new_geometry)
                adds.append(row)

        return self.update_layer(adds=adds, deletes=None, updates=updates)

    def update_layer(self, adds=None, deletes=None, updates=None, chunk_size=1000):
        """
        Performs updates on an arcgis feature service in manageable chunks.
        Updates are performed using multiple calls to the service where needed.  Large sets of updates are broken into
        smaller calls to avoid timeouts and other data size related issues.  Updates are executed in the following order:
        - Deletes
        - Adds
        - Updates
        If no elements are submitted for Adds, Deletes or Updates, then that stage of the process is skipped.

        :param adds: The list of add items to be added.
        :type adds:
        :param deletes:
        :type deletes:
        :param updates:
        :type updates:
        :param chunk_size:
        :type chunk_size:
        :return:
        :rtype:
        """
        logging.info('Updating: ' + self.source)
        result = {'adds': 0, 'deletes': 0, 'updates': 0}
        # perform updates in order deletes, adds, updates to support models where:
        # - updates are performed by deleting old records and replacing with new (remove old items before adding new)
        # - items can be added and updated in same cycles (ensure adds are in place before updates are applied)
        if deletes:
            for chunk in self._list_chunks(master_list=deletes, chunk_size=chunk_size):
                logging.info('Applying {} Deletes'.format(len(chunk)))
                self.layer.edit_features(deletes=str(chunk))
            result['deletes'] = len(deletes)
            logging.info('Total Deletes: {}'.format(result['deletes']))

        if adds:
            for chunk in self._list_chunks(master_list=adds, chunk_size=chunk_size):
                logging.info('Applying {} Adds'.format(len(chunk)))
                self.layer.edit_features(adds=chunk)
            result['adds'] = len(adds)
            logging.info('Total Adds: {}'.format(result['adds']))

        if updates:
            for chunk in self._list_chunks(master_list=updates, chunk_size=chunk_size):
                logging.info('Applying {} Updates'.format(len(chunk)))
                self.layer.edit_features(updates=chunk)
            result['updates'] = len(updates)
            logging.info('Total Updates: {}'.format(result['updates']))

        return result

    @staticmethod
    def update_date_field(row: Feature, field_name: str, new_value: datetime.date):
        current_date_value = datetime_utils.to_datetime(row.attributes[field_name])
        new_date_value = datetime_utils.to_datetime(new_value)

        if current_date_value == new_date_value:
            # if the values are the same, return False.
            return False

        # The values are different, Update the row.
        row.attributes[field_name] = new_date_value
        return True

    @staticmethod
    def update_int_field(row: Feature, field_name: str, new_value: int = None):
        if row.attributes[field_name] != new_value:
            row.attributes[field_name] = new_value
            return True

        return False

    def update_str_field(self, row: Feature, field_name: str, new_value: str = None):
        current_value = row.attributes[field_name]

        # if both are equal (str=str or None=None) return False.
        if current_value == new_value:
            return False

        # if (str and None) or (None and str)
        if not (current_value and new_value):
            row.attributes[field_name] = new_value
            return True

        # both values are non-identical strings.
        # if the test is not case sensitive and both UC strings match, no update needed
        if not self._case_sensitive and current_value.upper() == new_value.upper():
            return False

        # the strings are non-equivalent.  Update.
        row.attributes[field_name] = new_value
        return True

    def update_float_field(self, row: Feature, field_name: str, new_value: float = None):
        current_value = row.attributes[field_name]
        if current_value:
            current_value = round(current_value, self._rounding)  # round non zero, non-null values.

        if new_value:
            test_value = round(new_value, self._rounding)  # round non zero, non-null values.
        else:
            test_value = new_value

        if current_value == test_value:
            return False

        row.attributes[field_name] = new_value
        return True

    def update_field(self, row: Feature, field_name: str, field_type: str, new_value):
        ignore_types = ['esriFieldTypeOID', 'esriFieldTypeGeometry', 'esriFieldTypeBlob', 'esriFieldTypeRaster']
        if field_type in ['esriFieldTypeSmallInteger', 'esriFieldTypeInteger']:
            return self.update_int_field(row, field_name=field_name, new_value=new_value)
        elif field_type in ['esriFieldTypeSingle', 'esriFieldTypeDouble']:
            return self.update_float_field(row=row, field_name=field_name, new_value=new_value)
        elif field_type == 'esriFieldTypeString':
            return self.update_str_field(row=row, field_name=field_name, new_value=new_value)
        elif field_type == 'esriFieldTypeDate':
            return self.update_date_field(row=row, field_name=field_name, new_value=new_value)
        elif field_type in ignore_types:
            return False

        raise ValueError('Unhandled field type: ' + field_type)

    def update_row(self, row: Feature, field_types, new_values, shape_field=None):
        update_required = False
        for field_name in row.fields:
            if field_name != self.id_field and field_name in new_values:
                field_type = field_types[field_name]
                new_value = new_values[field_name]
                if self.update_field(row=row, field_name=field_name, field_type=field_type, new_value=new_value):
                    update_required = True
        #if shape_field:  TODO implement feature updates in a way that handles minor locational variations (nm)
        #
        #    new_wkt = self.to_wkt(new_values[shape_field])
        #    current_wkt = self.to_wkt(row.geometry)

        #    if current_wkt != new_wkt:
        #        row.geometry = self.to_geometry(new_values[shape_field])
        #        update_required = True

        return update_required

    @ staticmethod
    def to_wkt(source):
        if source is None:
            return None
        if isinstance(source, Geometry):
            return source.WKT
        if isinstance(source, arcpy.Geometry):
            return source.WKT

        geom = Geometry(source)
        return geom.WKT

    @staticmethod
    def to_geometry(source):
        if source is None:
            return None

        if isinstance(source, Geometry):
            return source

        if isinstance(source, arcpy.Geometry):
            return Geometry(source.JSON)

        return Geometry(source)

    @staticmethod
    def generate_new_row(new_values, geometry=None):
        attributes = {}
        for field_name, value in new_values.items():
            attributes[field_name] = value

        geometry_value = geometry
        if geometry_value and isinstance(geometry_value, arcpy.Geometry):
            geometry_value = json.loads(geometry_value.JSON)

        return {"attributes": attributes,
                "geometry": geometry_value}

    @staticmethod
    def _list_chunks(master_list, chunk_size):
        """
        Yield successive chunk-sized chunks from master_list.
        A utility function to support other methods in this module.
        """
        for i in range(0, len(master_list), chunk_size):
            yield master_list[i:i + chunk_size]
예제 #29
0
def process_by_metadata(gis):
    return_all_records = False

    look_back_days = config.look_back_days

    dates = csl.get_dates_in_range(look_back_days)
    where_clause = csl.form_query_string(dates)

    grid_fl = FeatureLayer(url=config.grid_url)
    grid_sdf = grid_fl.query(return_all_records=return_all_records,
                             where=where_clause).df

    geometry = grid_sdf.geometry
    sr = {'wkid': 4326}
    sp_rel = "esriSpatialRelIntersects"

    for idx, row in enumerate(grid_sdf.iterrows()):
        geom = row[1].SHAPE

        new_geom = Geometry({
            "rings":
            [[[geom.extent.upperRight.X - .1, geom.extent.lowerLeft.Y + .1],
              [geom.extent.lowerLeft.X + .1, geom.extent.lowerLeft.Y + .1],
              [geom.extent.lowerLeft.X + .1, geom.extent.upperRight.Y - .1],
              [geom.extent.upperRight.X - .1, geom.extent.upperRight.Y - .1],
              [geom.extent.upperRight.X - .1, geom.extent.lowerLeft.Y + .1]]],
            "spatialReference": {
                "wkid": 4326
            }
        })

        grid_filter = filters._filter(new_geom, sr, sp_rel)
        sp_filter = filters._filter(geom, sr, sp_rel)

        data_fl = FeatureLayer(url=config.features_url)
        #out_fields=in_fields,
        data_sdf = data_fl.query(geometry_filter=sp_filter,
                                 return_geometry=True,
                                 return_all_records=return_all_records).df

        print('Processing Completeness')
        #bounding_box = '(37.708132, -122.513617, 37.832132, -122.349607)'
        bounding_box = '(' + \
                    str(geom.extent.lowerLeft.Y) + ',' + \
                    str(geom.extent.lowerLeft.X) + ',' + \
                    str(geom.extent.upperRight.Y) + ',' + \
                    str(geom.extent.upperRight.X) + ')'

        osm_sdf = runner.gen_osm_sdf('line',
                                     bounding_box,
                                     osm_tag='highway',
                                     present=True)
        completeness_sdf, completeness_fl = comp.completeness(
            gis, osm_sdf, data_sdf, config.completeness_url, grid_filter, geom)
        print(completeness_sdf)
        #update_features(them_acc_sdf, them_acc_fl)
        print('Completeness Updated')

        print('Processing Logical Consistency')
        lc_sdf, lc_fl = lc.logical_consisitency(
            gis, config.template_fc, config.template_gdb,
            config.attr_check_file, config.attr_check_tab, data_sdf,
            config.features_url, config.logical_consistency_url, grid_filter,
            geom, config.attr_error_field_count, config.attr_error_field_def)
        print(lc_sdf)
        update_features(lc_sdf, lc_fl)
        print('Logical Consistency Updated.')

        print('Processing temporal currency')
        tc_sdf, tc_fl = tc.temporal_currency(gis, data_sdf,
                                             config.currency_url, grid_filter,
                                             geom, config.currency_field)
        print(tc_sdf)
        #update_features(tc_sdf, tc_fl)
        print('Temporal Currency Updated')

        print('Processing source lineage')
        sl_sdf, sl_fl = sl.source_lineage(gis, data_sdf,
                                          config.source_lineage_url,
                                          grid_filter, geom,
                                          config.search_field,
                                          config.value_field)
        print(sl_sdf)
        #update_features(sl_sdf, sl_fl)
        print('Source Lineage Updated')

        print('Processing Positional Accuracy')
        pa_sdf, pa_fl = pa.positional_accuracy(gis, data_sdf,
                                               config.positional_acc_url,
                                               grid_filter, geom,
                                               config.positional_acc_field)
        print(pa_sdf)
        #update_features(pa_sdf, pa_fl)
        print('Positional Accuracy Updated')

        print('Processing Thematic Accuracy')
        them_acc_sdf, them_acc_fl = them_acc.thematic_accuracy(
            gis, data_sdf, config.thematic_url, grid_filter, geom,
            config.thematic_acc_field)
        print(them_acc_sdf)
        #update_features(them_acc_sdf, them_acc_fl)
        print('Theamatic Accuracy Updated')

    return
예제 #30
0
import random

gis = GIS("https://www.arcgis.com", "devlaahernandezgo", "fQ2HFMfk5ya7E9J")

url = 'https://services5.arcgis.com/otu0qUsUpyUjfoF3/arcgis/rest/services/informacion_lugar/FeatureServer/0'
layer = FeatureLayer(url)

for x in range(0, 9):
    body = {
        "geometry": {
            "objectId": 2,
            "x": -74.07,
            "y": 4.7 - random.random(),
            "spatialReference": {
                "wkid": 4326
            }
        },
        "attributes": {
            "edificio": "Aqui porfin",
            "nombre": "prueba",
            "numero_interno": "001",
            "maxima_ocupacion": "18",
            "tasa": random.randrange(0, 18),
            "tiempo_promedio": "30",
            "indice_bioseguro": "70"
        }
    }
    layer.edit_features(adds=[body])

print(layer.query().sdf)