コード例 #1
0
ファイル: convert.py プロジェクト: denkide/ColumbiaCarto
def table_to_points(
    dataset_path,
    output_path,
    x_field_name,
    y_field_name,
    spatial_reference_item=4326,
    **kwargs
):
    """Convert coordinate table to a new point dataset.

    Args:
        dataset_path (str): Path of the dataset.
        output_path (str): Path of the output dataset.
        x_field_name (str): Name of field with x-coordinate.
        y_field_name (str): Name of field with y-coordinate.
        spatial_reference_item: Item from which the spatial reference of the output
            geometry will be derived. Default is 4326 (EPSG code for unprojected WGS84).
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        z_field_name (str): Name of the field with z-coordinate.
        log_level (str): Level to log the function at. Default is "info".

    Returns:
        str: Path of the converted dataset.
    """
    kwargs.setdefault("dataset_where_sql")
    kwargs.setdefault("z_field_name")
    log = leveled_logger(LOG, kwargs.setdefault("log_level", "info"))
    log("Start: Convert %s to spatial dataset %s.", dataset_path, output_path)
    meta = {"spatial": spatial_reference_metadata(spatial_reference_item)}
    view_name = unique_name()
    arcpy.management.MakeXYEventLayer(
        table=dataset_path,
        out_layer=view_name,
        in_x_field=x_field_name,
        in_y_field=y_field_name,
        in_z_field=kwargs.get("z_field_name"),
        spatial_reference=meta["spatial"]["object"],
    )
    dataset.copy(
        view_name,
        output_path,
        dataset_where_sql=kwargs["dataset_where_sql"],
        log_level=None,
    )
    dataset.delete(view_name, log_level=None)
    log("End: Convert.")
    return output_path
コード例 #2
0
ファイル: etl.py プロジェクト: denkide/ColumbiaCarto
    def init_schema(self, template_path=None, **kwargs):
        """Initialize dataset schema. Use only when extracting dataset.

        Keyword arguments that describe the schema will only be referenced
        if template_path is undefined.

        Args:
            template_path (str): Path of the dataset to use as schema
                template.
            **kwargs: Arbitrary keyword arguments. See below.

        Keyword Args:
            field_metadata_list (iter): Field metadata mappings. Will be ignored if
                template_path used.
            geometry_type (str): NGeometry type. Valid types are: point, multipoint,
                polygon, polyline. If unstated or another value, dataset will be
                nonspatial. Will be ignored if template_path used.
            spatial_reference_item: Item from which the spatial reference of the output
                geometry will be derived. Default is 4326 (EPSG code for unprojected
                WGS84).  Will be ignored if template_path used.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        LOG.info("Start: Initialize schema.")
        self.transform_path = unique_path("init")
        if template_path:
            dataset.copy(
                dataset_path=template_path,
                output_path=self.transform_path,
                schema_only=True,
                log_level=None,
            )
        else:
            dataset.create(dataset_path=self.transform_path, log_level=None, **kwargs)
        LOG.info("End: Initialize.")
        return self
コード例 #3
0
ファイル: etl.py プロジェクト: denkide/ColumbiaCarto
    def load(
        self, dataset_path, load_where_sql=None, preserve_features=False, **kwargs
    ):
        """Load features from transform- to load-dataset.

        Args:
            dataset_path (str): Path of dataset to load.
            load_where_sql (str): SQL where-clause for subselection from the
                transform-dataset.
            preserve_features (bool): Keep current features in load-dataset if True;
                remove them before adding transform-features if False.

        Keyword Args:
            use_edit_session (bool): Updates are done in an edit session if True.
                Default is False.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        kwargs.setdefault("use_edit_session", False)
        LOG.info("Start: Load %s.", dataset_path)
        # Load to an existing dataset.
        if dataset.is_valid(dataset_path):
            feature_action_count = Counter()
            if not preserve_features:
                feature_action_count.update(
                    features.delete(dataset_path, log_level=None, **kwargs)
                )
            feature_action_count.update(
                features.insert_from_path(
                    dataset_path,
                    insert_dataset_path=self.transform_path,
                    insert_where_sql=load_where_sql,
                    use_edit_session=kwargs["use_edit_session"],
                    log_level=None,
                )
            )
        # Load to a new dataset.
        else:
            feature_action_count = dataset.copy(
                self.transform_path,
                output_path=dataset_path,
                dataset_where_sql=load_where_sql,
                log_level=None,
            )
        for action, count in sorted(feature_action_count.items()):
            LOG.info("%s features %s.", count, action)
        LOG.info("End: Load.")
        return self
コード例 #4
0
ファイル: etl.py プロジェクト: denkide/ColumbiaCarto
    def extract(self, dataset_path, extract_where_sql=None):
        """Extract features to transform workspace.

        Args:
            dataset_path (str): Path of the dataset to extract.
            extract_where_sql (str): SQL where-clause for extract subselection.

        Returns:
            arcetl.etl.ArcETL: Reference to the instance.
        """
        LOG.info("Start: Extract %s.", dataset_path)
        self.transform_path = unique_path("extract")
        feature_action_count = dataset.copy(
            dataset_path=dataset_path,
            output_path=self.transform_path,
            dataset_where_sql=extract_where_sql,
            log_level=None,
        )
        for action, count in sorted(feature_action_count.items()):
            LOG.info("%s features %s.", count, action)
        LOG.info("End: Extract.")
        return self
コード例 #5
0
ファイル: network.py プロジェクト: denkide/ColumbiaCarto
def generate_service_rings(
    dataset_path,
    output_path,
    network_path,
    cost_attribute,
    ring_width,
    max_distance,
    **kwargs
):
    """Create facility service ring features using a network dataset.

    Args:
        dataset_path (str): Path of the dataset.
        output_path (str): Path of the output service rings dataset.
        network_path (str): Path of the network dataset.
        cost_attribute (str): Name of the network cost attribute to use.
        ring_width (float): Distance a service ring represents in travel, in the
            dataset's units.
        max_distance (float): Distance in travel from the facility the outer ring will
            extend to, in the dataset's units.
        **kwargs: Arbitrary keyword arguments. See below.

    Keyword Args:
        dataset_where_sql (str): SQL where-clause for dataset subselection.
        id_field_name (str): Name of facility ID field.
        restriction_attributes (iter): Collection of network restriction attribute
            names to use.
        travel_from_facility (bool): Flag to indicate performing the analysis
            travelling from (True) or to (False) the facility. Default is False.
        detailed_features (bool): Flag to generate high-detail features. Default is
            False.
        overlap_facilities (bool): Flag to overlap different facility service areas.
            Default is True.
        trim_value (float): Dstance from the network features to trim service areas at.
        log_level (str): Level to log the function at. Default is 'info'.

    Returns:
        str: Path of the output service rings dataset.

    """
    kwargs.setdefault('dataset_where_sql')
    kwargs.setdefault('id_field_name')
    kwargs.setdefault('restriction_attributes')
    kwargs.setdefault('travel_from_facility', False)
    kwargs.setdefault('detailed_features', False)
    kwargs.setdefault('overlap_facilities', True)
    kwargs.setdefault('trim_value')
    log = leveled_logger(LOG, kwargs.setdefault('log_level', 'info'))
    log("Start: Generate service rings for %s.", dataset_path)
    # trim_value assumes meters if not input as linear_unit string.
    if kwargs['trim_value'] is not None:
        trim_value = arcobj.linear_unit_string(kwargs['trim_value'], dataset_path)
    else:
        trim_value = None
    view = {'dataset': arcobj.DatasetView(dataset_path, kwargs['dataset_where_sql'])}
    with arcobj.ArcExtension('Network'):
        arcpy.na.MakeServiceAreaLayer(
            in_network_dataset=network_path,
            out_network_analysis_layer='service_area',
            impedance_attribute=cost_attribute,
            travel_from_to=(
                'travel_from' if kwargs['travel_from_facility'] else 'travel_to'
            ),
            default_break_values=(
                ' '.join(
                    str(x) for x in range(ring_width, max_distance + 1, ring_width)
                )
            ),
            polygon_type=(
                'detailed_polys' if kwargs['detailed_features'] else 'simple_polys'
            ),
            merge=('no_merge' if kwargs['overlap_facilities'] else 'no_overlap'),
            nesting_type='rings',
            UTurn_policy='allow_dead_ends_and_intersections_only',
            restriction_attribute_name=kwargs['restriction_attributes'],
            polygon_trim=(True if trim_value else False),
            poly_trim_value=trim_value,
            hierarchy='no_hierarchy',
        )
        with view['dataset']:
            arcpy.na.AddLocations(
                in_network_analysis_layer="service_area",
                sub_layer="Facilities",
                in_table=view['dataset'].name,
                field_mappings='Name {} #'.format(kwargs['id_field_name']),
                search_tolerance=max_distance,
                match_type='match_to_closest',
                append='clear',
                snap_to_position_along_network='no_snap',
                exclude_restricted_elements=True,
            )
        arcpy.na.Solve(
            in_network_analysis_layer="service_area",
            ignore_invalids=True,
            terminate_on_solve_error=True,
        )
    dataset.copy('service_area/Polygons', output_path, log_level=None)
    dataset.delete('service_area', log_level=None)
    if kwargs['id_field_name']:
        meta = {
            'id_field': arcobj.field_metadata(dataset_path, kwargs['id_field_name'])
        }
        dataset.add_field_from_metadata(output_path, meta['id_field'], log_level=None)
        attributes.update_by_function(
            output_path,
            meta['id_field']['name'],
            function=TYPE_ID_FUNCTION_MAP[meta['id_field']['type']],
            field_as_first_arg=False,
            arg_field_names=['Name'],
            log_level=None,
        )
    log("End: Generate.")
    return output_path