Exemple #1
0
    def test_int(self):
        name = "_int"
        utils.add_field_with_value(self.testfile, name,
                                   field_value=5)
        nt.assert_true(name in [f.name for f in arcpy.ListFields(self.testfile)])

        newfield = arcpy.ListFields(self.testfile, name)[0]
        nt.assert_equal(newfield.type, u'Integer')
Exemple #2
0
    def test_float(self):
        name = "_float"
        utils.add_field_with_value(self.testfile, name,
                                   field_value=5.0)
        nt.assert_true(name in [f.name for f in arcpy.ListFields(self.testfile)])

        newfield = arcpy.ListFields(self.testfile, name)[0]
        nt.assert_equal(newfield.type, u'Double')
Exemple #3
0
    def test_no_value_number(self):
        name = "_no_valnum"
        utils.add_field_with_value(self.testfile, name,
                                   field_type='DOUBLE')

        nt.assert_true(name in [f.name for f in arcpy.ListFields(self.testfile)])

        newfield = arcpy.ListFields(self.testfile, name)[0]
        nt.assert_equal(newfield.type, u'Double')
Exemple #4
0
    def test_no_value_string(self):
        name = "_no_valstr"
        utils.add_field_with_value(self.testfile, name,
                                   field_type='TEXT',
                                   field_length=15)

        nt.assert_true(name in [f.name for f in arcpy.ListFields(self.testfile)])

        newfield = arcpy.ListFields(self.testfile, name)[0]
        nt.assert_equal(newfield.type, u'String')
        nt.assert_true(newfield.length, 15)
Exemple #5
0
    def test_unicode(self):
        name = "_unicode"
        utils.add_field_with_value(self.testfile, name,
                                   field_value=u"example_value",
                                   field_length=15)

        nt.assert_true(name in [f.name for f in arcpy.ListFields(self.testfile)])

        newfield = arcpy.ListFields(self.testfile, name)[0]
        nt.assert_equal(newfield.type, u'String')
        nt.assert_true(newfield.length, 15)
Exemple #6
0
    def test_with_general_function(self):
        utils.add_field_with_value(self.testfile, self.field_added, field_type="LONG")
        utils.populate_field(
            self.testfile,
            lambda row: row[0]**2,
            self.field_added,
            ["FID"]
        )

        with arcpy.da.SearchCursor(self.testfile, [self.field_added, "FID"]) as cur:
            for row in cur:
                nt.assert_equal(row[0], row[1] ** 2)
Exemple #7
0
    def test_with_dictionary(self):
        value_dict = {n: n for n in range(7)}
        value_fxn = lambda row: value_dict.get(row[0], -1)
        utils.add_field_with_value(self.testfile, self.field_added, field_type="LONG")

        utils.populate_field(
            self.testfile,
            lambda row: value_dict.get(row[0], -1),
            self.field_added,
            ["FID"]
        )

        with arcpy.da.SearchCursor(self.testfile, [self.field_added, "FID"]) as cur:
            for row in cur:
                nt.assert_equal(row[0], row[1])
def accumulate(subcatchments_layer=None,
               id_col=None,
               ds_col=None,
               value_columns=None,
               streams_layer=None,
               output_layer=None,
               default_aggfxn='sum',
               ignored_value=None,
               verbose=False,
               asMessage=False):
    """
    Accumulate upstream subcatchment properties in each stream segment.

    Parameters
    ----------
    subcatchments_layer, streams_layer : str
        Names of the feature classes containing subcatchments and
        streams, respectively.
    id_col, ds_col : str
        Names of the fields in ``subcatchment_layer`` that contain the
        subcatchment ID and downstream subcatchment ID, respectively.
    sum_cols, avg_cols : list of str
        Names of the fields that will be accumulated by summing (e.g.,
        number of permit violations) and area-weighted averaging (e.g.,
        percent impervious land cover).

        .. note ::
           Do not include a column for subcatchment area in
           ``sum_cols``. Specify that in ``area_col`` instead.

    value_columns : list of str
        List of the fields in ``subcatchments`` that contains
        water quality score and watershed property that should
        be propagated.

        ``subcatchments_layer``. Falls back to computing areas
        on-the-fly if not provided.
    output_layer : str, optional
        Names of the new layer where the results should be saved.

    Returns
    -------
    output_layer : str
        Names of the new layer where the results were successfully
        saved.

    See also
    --------
    propagator.analysis.aggregate_streams_by_subcatchment
    propagator.analysis.collect_upstream_attributes
    propagator.utils.rec_groupby

    """

    # Separate value columns into field name and aggregation method
    value_columns = validate.value_column_stats(value_columns, default_aggfxn)
    value_columns_aggmethods = [i[1] for i in value_columns]
    vc_field_wfactor = []
    for col, aggmethod, wfactor in value_columns:
        if aggmethod.lower() == 'weighted_average':
            vc_field_wfactor.append([col, wfactor])
        else:
            vc_field_wfactor.append(col)

    # define the Statistic objects that will be passed to `rec_groupby`
    statfxns = []
    for agg in value_columns_aggmethods:
        statfxns.append(
            partial(utils.stats_with_ignored_values,
                    statfxn=analysis.AGG_METHOD_DICT[agg.lower()],
                    ignored_value=ignored_value))

    res_columns = [
        '{}{}'.format(prefix[:3].upper(), col)
        for col, prefix, _ in value_columns
    ]
    stats = [
        utils.Statistic(srccol, statfxn,
                        rescol) for srccol, statfxn, rescol in zip(
                            vc_field_wfactor, statfxns, res_columns)
    ]

    # create a unique list of columns we need
    # from the subcatchment layer
    target_fields = []
    for s in stats:
        if numpy.isscalar(s.srccol):
            target_fields.append(s.srccol)
        else:
            target_fields.extend(s.srccol)
    target_fields = numpy.unique(target_fields)

    # split the stream at the subcatchment boundaries and then
    # aggregate all of the stream w/i each subcatchment
    # into single geometries/records.
    split_streams_layer = analysis.aggregate_streams_by_subcatchment(
        stream_layer=streams_layer,
        subcatchment_layer=subcatchments_layer,
        id_col=id_col,
        ds_col=ds_col,
        other_cols=target_fields,
        output_layer=output_layer,
        agg_method="first",  # first works b/c all values are equal
    )

    # Add target_field columns back to spilt_stream_layer.
    final_fields = [s.rescol for s in stats]
    for field in final_fields:
        utils.add_field_with_value(
            table=split_streams_layer,
            field_name=field,
            field_value=None,
            field_type='DOUBLE',
        )

    # load the split/aggregated streams' attribute table
    split_streams_table = utils.load_attribute_table(split_streams_layer,
                                                     id_col, ds_col,
                                                     *final_fields)

    # load the subcatchment attribute table
    subcatchments_table = utils.load_attribute_table(subcatchments_layer,
                                                     id_col, ds_col,
                                                     *target_fields)

    upstream_attributes = analysis.collect_upstream_attributes(
        subcatchments_table=subcatchments_table,
        target_subcatchments=split_streams_table,
        id_col=id_col,
        ds_col=ds_col,
        preserved_fields=target_fields)
    aggregated_properties = utils.rec_groupby(upstream_attributes, id_col,
                                              *stats)

    # Update output layer with aggregated values.
    utils.update_attribute_table(
        layerpath=split_streams_layer,
        attribute_array=aggregated_properties,
        id_column=id_col,
        orig_columns=final_fields,
    )

    # Remove extraneous columns
    required_columns = [
        id_col, ds_col, 'FID', 'Shape', 'Shape_Length', 'Shape_Area',
        'OBJECTID'
    ]
    fields_to_remove = filter(
        lambda name: name not in required_columns and name not in final_fields,
        [f.name for f in arcpy.ListFields(split_streams_layer)])
    utils.delete_columns(split_streams_layer, *fields_to_remove)

    return split_streams_layer
Exemple #9
0
 def test_overwrite_existing_yes(self):
     utils.add_field_with_value(self.testfile, "existing",
                                overwrite=True,
                                field_type="LONG")
Exemple #10
0
 def test_overwrite_existing_no(self):
     utils.add_field_with_value(self.testfile, "existing")
Exemple #11
0
 def test_no_value_no_field_type(self):
     utils.add_field_with_value(self.testfile, "_willfail")