def test_populate_node_family_full_parentage_complex_dependencies(self) -> None:
        view_1 = BigQueryView(
            dataset_id="dataset_1",
            view_id="table_1",
            description="table_1 description",
            view_query_template="SELECT * FROM `{project_id}.source_dataset.source_table`",
        )
        view_2 = BigQueryView(
            dataset_id="dataset_2",
            view_id="table_2",
            description="table_2 description",
            view_query_template="SELECT * FROM `{project_id}.dataset_1.table_1`",
        )
        view_3 = BigQueryView(
            dataset_id="dataset_3",
            view_id="table_3",
            description="table_3 description",
            view_query_template="""
                           SELECT * FROM `{project_id}.dataset_1.table_1`
                           JOIN `{project_id}.dataset_2.table_2`
                           USING (col)""",
        )
        view_4 = BigQueryView(
            dataset_id="dataset_4",
            view_id="table_4",
            description="table_4 description",
            view_query_template="""
                           SELECT * FROM `{project_id}.dataset_2.table_2`
                           JOIN `{project_id}.dataset_3.table_3`
                           USING (col)""",
        )

        dag_walker = BigQueryViewDagWalker([view_1, view_2, view_3, view_4])
        start_node = dag_walker.node_for_view(view_4)

        dag_walker.populate_node_family_for_node(
            node=start_node, view_source_table_datasets={"source_dataset"}
        )
        expected_parent_nodes = {
            DagKey(
                view_address=BigQueryAddress(
                    dataset_id="source_dataset", table_id="source_table"
                )
            ),
            DagKey.for_view(view_1),
            DagKey.for_view(view_2),
            DagKey.for_view(view_3),
        }
        self.assertEqual(expected_parent_nodes, start_node.node_family.full_parentage)
class CalculationDocumentationGenerator:
    """A class for generating documentation about our calculations."""
    def __init__(
        self,
        products: List[ProductConfig],
        root_calc_docs_dir: str,
    ):
        self.root_calc_docs_dir = root_calc_docs_dir
        self.products = products

        self.states_by_product = self.get_states_by_product()

        # Reverses the states_by_product dictionary
        self.products_by_state: Dict[StateCode, Dict[
            GCPEnvironment,
            List[ProductName]]] = defaultdict(lambda: defaultdict(list))

        for product_name, environments_to_states in self.states_by_product.items(
        ):
            for environment, states in environments_to_states.items():
                for state in states:
                    self.products_by_state[state][environment].append(
                        product_name)
        self.dag_walker = BigQueryViewDagWalker(
            _build_views_to_update(
                view_source_table_datasets=VIEW_SOURCE_TABLE_DATASETS,
                candidate_view_builders=DEPLOYED_VIEW_BUILDERS,
                dataset_overrides=None,
                override_should_build_predicate=True,
            ))
        self.prod_templates_yaml = YAMLDict.from_path(
            PRODUCTION_TEMPLATES_PATH)

        self.daily_pipelines = self.prod_templates_yaml.pop_dicts(
            "daily_pipelines")
        self.historical_pipelines = self.prod_templates_yaml.pop_dicts(
            "historical_pipelines")

        self.metric_calculations_by_state = self._get_state_metric_calculations(
            self.daily_pipelines, "daily")
        # combine with the historical pipelines
        for name, metric_info_list in self._get_state_metric_calculations(
                self.historical_pipelines,
                "triggered by code changes").items():
            self.metric_calculations_by_state[name].extend(metric_info_list)

        # Reverse the metric_calculations_by_state dictionary
        self.state_metric_calculations_by_metric: Dict[
            str, List[StateMetricInfo]] = defaultdict(list)
        for state_name, metric_info_list in self.metric_calculations_by_state.items(
        ):
            for metric_info in metric_info_list:
                self.state_metric_calculations_by_metric[
                    metric_info.name].append(
                        StateMetricInfo(
                            name=state_name,
                            month_count=metric_info.month_count,
                            frequency=metric_info.frequency,
                        ))

        self.metrics_by_generic_types = self._get_metrics_by_generic_types()

        self.generic_types_by_metric_name = {}
        for generic_type, metric_list in self.metrics_by_generic_types.items():
            for metric in metric_list:
                self.generic_types_by_metric_name[
                    DATAFLOW_METRICS_TO_TABLES[metric]] = generic_type

        def _preprocess_views(
                v: BigQueryView, _parent_results: Dict[BigQueryView,
                                                       None]) -> None:
            dag_key = DagKey(view_address=v.address)
            node = self.dag_walker.nodes_by_key[dag_key]

            # Fills out full child/parent dependencies and tree representations for use
            # in various sections.
            self.dag_walker.populate_node_family_for_node(
                node=node,
                datasets_to_skip={DATAFLOW_METRICS_MATERIALIZED_DATASET}
                | RAW_TABLE_DATASETS,
                custom_node_formatter=self.
                _dependency_tree_formatter_for_gitbook,
                view_source_table_datasets=VIEW_SOURCE_TABLE_DATASETS
                | LATEST_VIEW_DATASETS,
            )

        self.dag_walker.process_dag(_preprocess_views)
        self.all_views_to_document = self._get_all_views_to_document()

    def _get_all_export_config_view_builder_addresses(
            self) -> Set[BigQueryAddress]:
        all_export_view_builder_addresses: Set[BigQueryAddress] = set()
        for product in self.products:
            all_export_view_builder_addresses = all_export_view_builder_addresses.union(
                self._get_all_config_view_addresses_for_product(product))
        return all_export_view_builder_addresses

    def get_states_by_product(
        self, ) -> Dict[ProductName, Dict[GCPEnvironment, List[StateCode]]]:
        """Returns the dict of products to states and environments."""
        states_by_product: Dict[ProductName, Dict[
            GCPEnvironment,
            List[StateCode]]] = defaultdict(lambda: defaultdict(list))
        for product in self.products:
            if product.states is not None:
                for state in product.states:
                    environment = GCPEnvironment(state.environment)
                    state_code = StateCode(state.state_code)
                    states_by_product[product.name][environment].append(
                        state_code)

        return states_by_product

    @staticmethod
    def bulleted_list(string_list: List[str],
                      tabs: int = 1,
                      escape_underscores: bool = True) -> str:
        """Returns a string holding a bulleted list of the input string list."""
        return "\n".join([
            f"{'  '*tabs}- {s.replace('__', ESCAPED_DOUBLE_UNDERSCORE) if escape_underscores else s}"
            for s in string_list
        ])

    def _get_dataflow_pipeline_enabled_states(self) -> Set[StateCode]:
        """Returns the set of StateCodes for all states present in our production calc
        pipeline template."""
        states = {
            pipeline.peek("state_code", str).upper()
            for pipeline in self.daily_pipelines
        }.union({
            pipeline.peek("state_code", str).upper()
            for pipeline in self.historical_pipelines
        })

        for state_code in states:
            if not StateCode.is_state_code(state_code):
                raise ValueError(
                    f"Found invalid state code value [{state_code}]"
                    f" in pipeline template config.")

        return {StateCode(state_code) for state_code in states}

    def _get_product_enabled_states(self) -> Set[StateCode]:
        states: Set[str] = set()
        for product in self.products:
            if product.states is not None:
                states = states.union(
                    {state.state_code
                     for state in product.states})

        for state_code in states:
            if not StateCode.is_state_code(state_code):
                raise ValueError(
                    f"Found invalid state code value [{state_code}]"
                    f" in product config.")
        return {StateCode(state_code) for state_code in states}

    def _get_calculation_states_summary_str(self) -> str:
        states = self._get_dataflow_pipeline_enabled_states().union(
            self._get_product_enabled_states())

        state_names = [str(state_code.get_state()) for state_code in states]
        header = "- States\n"
        return header + self.bulleted_list(
            sorted([
                f"[{state_name}](calculation/states/{self._normalize_string_for_path(state_name)}.md)"
                for state_name in state_names
            ]))

    def _get_products_summary_str(self) -> str:
        header = "\n- Products\n"

        product_names = sorted([product.name for product in self.products])
        return header + self.bulleted_list([
            f"[{product_name}](calculation/products/"
            f"{self._normalize_string_for_path(product_name)}/"
            f"{self._normalize_string_for_path(product_name)}_summary.md)"
            for product_name in product_names
        ])

    def _get_views_summary_str(self) -> str:
        header = "\n- Views"
        bullets = ""
        for dataset_id, dag_key_list in self._get_keys_by_dataset(
                self.all_views_to_document).items():
            bullets += f"\n  - {dataset_id}\n"
            bullets += self.bulleted_list(
                [
                    f"[{dag_key.table_id.replace('__', ESCAPED_DOUBLE_UNDERSCORE)}](calculation/views/{dataset_id}/{dag_key.table_id}.md)"
                    for dag_key in dag_key_list
                ],
                tabs=2,
                escape_underscores=False,
            )

        return header + bullets + "\n"

    @staticmethod
    def _get_metrics_by_generic_types(
    ) -> Dict[str, List[Type[RecidivizMetric]]]:
        metrics_dict: Dict[str,
                           List[Type[RecidivizMetric]]] = defaultdict(list)

        for metric in DATAFLOW_METRICS_TO_TABLES:
            if issubclass(metric, SupervisionMetric):
                metrics_dict["Supervision"].append(metric)
            elif issubclass(metric, ReincarcerationRecidivismMetric):
                metrics_dict["Recidivism"].append(metric)
            elif issubclass(metric, ProgramMetric):
                metrics_dict["Program"].append(metric)
            elif issubclass(metric, IncarcerationMetric):
                metrics_dict["Incarceration"].append(metric)
            elif issubclass(metric, ViolationMetric):
                metrics_dict["Violation"].append(metric)
            else:
                raise ValueError(
                    f"{metric.__name__} is not a subclass of an expected"
                    f" metric type.)")

        return metrics_dict

    def _get_dataflow_metrics_summary_str(self) -> str:

        dataflow_str = "\n- Dataflow Metrics\n"

        for header, class_list in self.metrics_by_generic_types.items():
            dataflow_str += f"  - {header.upper()}\n"

            dataflow_str += (self.bulleted_list(
                [
                    f"[{metric.__name__}](calculation/metrics/{header.lower()}/{DATAFLOW_METRICS_TO_TABLES[metric]}.md)"
                    for metric in class_list
                ],
                2,
            ) + "\n")

        return dataflow_str

    def generate_summary_strings(self) -> List[str]:
        logging.info("Generating calculation summary markdown")
        calculation_catalog_summary = ["## Calculation Catalog\n\n"]

        calculation_catalog_summary.extend(
            [self._get_calculation_states_summary_str()])
        calculation_catalog_summary.extend([self._get_products_summary_str()])
        calculation_catalog_summary.extend([self._get_views_summary_str()])
        calculation_catalog_summary.extend(
            [self._get_dataflow_metrics_summary_str()])

        return calculation_catalog_summary

    def products_list_for_env(self, state_code: StateCode,
                              environment: GCPEnvironment) -> str:
        """Returns a bulleted list of products launched in the state in the given environment."""
        if environment not in {
                GCPEnvironment.PRODUCTION, GCPEnvironment.STAGING
        }:
            raise ValueError(f"Unexpected environment: [{environment.value}]")

        if (not state_code in self.products_by_state
                or environment not in self.products_by_state[state_code]
                or not self.products_by_state[state_code][environment]):
            return "N/A"

        return self.bulleted_list([
            f"[{product}](../products/{self._normalize_string_for_path(product)}/{self._normalize_string_for_path(product)}_summary.md)"
            for product in self.products_by_state[state_code][environment]
        ])

    def states_list_for_env(self, product: ProductConfig,
                            environment: GCPEnvironment) -> str:
        """Returns a bulleted list of states where a product is launched in the given environment."""
        if environment not in {
                GCPEnvironment.PRODUCTION, GCPEnvironment.STAGING
        }:
            raise ValueError(f"Unexpected environment: [{environment.value}]")
        states_list = [
            f"[{str(state_code.get_state())}](../../states/{self._normalize_string_for_path(str(state_code.get_state()))}.md)"
            for state_code in self.states_by_product[product.name][environment]
        ]

        return self.bulleted_list(states_list) if states_list else "  N/A"

    def _get_shipped_states_str(self, product: ProductConfig) -> str:
        """Returns a string containing lists of shipped states and states in development
        for a given product."""

        shipped_states_str = self.states_list_for_env(
            product, GCPEnvironment.PRODUCTION)

        development_states_str = self.states_list_for_env(
            product, GCPEnvironment.STAGING)

        return ("##SHIPPED STATES\n" + shipped_states_str +
                "\n\n## STATES IN DEVELOPMENT\n" + development_states_str +
                "\n\n")

    @staticmethod
    def _get_keys_by_dataset(dag_keys: Set[DagKey]) -> Dict[str, List[DagKey]]:
        """Given a set of DagKeys, returns a sorted dictionary of those keys, organized by dataset."""
        datasets_to_views = defaultdict(list)
        for key in sorted(dag_keys,
                          key=lambda dag_key:
                          (dag_key.dataset_id, dag_key.table_id)):
            datasets_to_views[key.dataset_id].append(key)
        return datasets_to_views

    def _get_dataset_headers_to_views_str(
            self,
            dag_keys: Set[DagKey],
            source_tables_section: bool = False) -> str:
        """Given a set of DagKeys, returns a str list of those views, organized by dataset."""
        datasets_to_keys = self._get_keys_by_dataset(dag_keys)
        views_str = ""
        for dataset, keys in datasets_to_keys.items():
            views_str += f"####{dataset}\n"
            views_str += (
                f"_{VIEW_SOURCE_TABLE_DATASETS_TO_DESCRIPTIONS[dataset]}_\n"
                if source_tables_section else "")
            views_str += (self.bulleted_list(
                [
                    self._dependency_tree_formatter_for_gitbook(
                        dag_key, products_section=True) for dag_key in keys
                ],
                escape_underscores=False,
            )) + "\n\n"

        return views_str

    def _get_views_str_for_product(self, view_keys: Set[DagKey]) -> str:
        """Returns the string containing the VIEWS section of the product markdown."""
        views_header = "##VIEWS\n\n"
        if not view_keys:
            return views_header + "*This product does not use any BigQuery views.*\n\n"
        return views_header + self._get_dataset_headers_to_views_str(view_keys)

    def _get_source_tables_str_for_product(self,
                                           source_keys: Set[DagKey]) -> str:
        """Returns the string containing the SOURCE TABLES section of the product markdown."""
        source_tables_header = (
            "##SOURCE TABLES\n"
            "_Reference views that are used by other views. Some need to be updated manually._\n\n"
        )
        if not source_keys:
            return (source_tables_header +
                    "*This product does not reference any source tables.*\n\n")
        return source_tables_header + self._get_dataset_headers_to_views_str(
            source_keys, source_tables_section=True)

    def _get_metrics_str_for_product(self, metric_keys: Set[DagKey]) -> str:
        """Builds the Metrics string for the product markdown file. Creates a table of
        necessary metric types and whether a state calculates those metrics"""
        metrics_header = (
            "##METRICS\n_All metrics required to support this product and"
            " whether or not each state regularly calculates the metric._"
            "\n\n** DISCLAIMER **\nThe presence of all required metrics"
            " for a state does not guarantee that this product is ready to"
            " launch in that state.\n\n")

        if not metric_keys:
            return (metrics_header +
                    "*This product does not rely on Dataflow metrics.*\n")
        state_codes = sorted(self._get_dataflow_pipeline_enabled_states(),
                             key=lambda code: code.value)

        headers = ["**Metric**"] + [
            f"**{state_code.value}**" for state_code in state_codes
        ]

        table_matrix = [[
            f"[{DATAFLOW_TABLES_TO_METRIC_TYPES[metric_key.table_id].value}](../../metrics/{self.generic_types_by_metric_name[metric_key.table_id].lower()}/{metric_key.table_id}.md)"
        ] + [
            "X"
            if DATAFLOW_TABLES_TO_METRIC_TYPES[metric_key.table_id].value in [
                metric.name for metric in self.metric_calculations_by_state[
                    str(state_code.get_state())]
            ] else "" for state_code in state_codes
        ] for metric_key in sorted(metric_keys,
                                   key=lambda dag_key: dag_key.table_id)]

        writer = MarkdownTableWriter(headers=headers,
                                     value_matrix=table_matrix,
                                     margin=0)
        return metrics_header + writer.dumps()

    @staticmethod
    def _get_all_config_view_addresses_for_product(
        product: ProductConfig, ) -> Set[BigQueryAddress]:
        """Returns a set containing a BQ address for each view listed by each export
        necessary for the given product."""
        all_config_view_addresses: Set[BigQueryAddress] = set()
        for export in product.exports:
            collection_config = VIEW_COLLECTION_EXPORT_INDEX[export]
            view_builders = collection_config.view_builders_to_export

            all_config_view_addresses = all_config_view_addresses.union({
                BigQueryAddress(
                    dataset_id=view_builder.dataset_id,
                    table_id=view_builder.view_id,
                )
                for view_builder in view_builders
            })

        return all_config_view_addresses

    def _get_all_parent_keys_for_product(
            self, product: ProductConfig) -> Set[DagKey]:
        """Returns a set containing a DagKey for every view that this product relies upon. """
        all_config_view_addresses = self._get_all_config_view_addresses_for_product(
            product)

        all_parent_keys: Set[DagKey] = set()
        for view_address in all_config_view_addresses:
            dag_key = DagKey(view_address=view_address)
            node = self.dag_walker.nodes_by_key[dag_key]
            # Add in the top level view
            all_parent_keys.add(dag_key)
            # Add in all ancestors
            all_parent_keys = all_parent_keys.union(
                node.node_family.full_parentage)

        # Ignore materialized metric views as relevant metric info can be found in a
        # different dataset (DATAFLOW_METRICS_DATASET).
        all_parent_keys.difference_update({
            key
            for key in all_parent_keys
            if key.dataset_id == DATAFLOW_METRICS_MATERIALIZED_DATASET
        })
        return all_parent_keys

    def _get_product_information(self, product: ProductConfig) -> str:
        """Returns a string containing all relevant information for a given product
        including name, views used, source tables, and required metrics."""

        documentation = f"#{product.name.upper()}\n"
        documentation += product.description + "\n"

        documentation += self._get_shipped_states_str(product)

        all_parent_keys = self._get_all_parent_keys_for_product(product)

        source_keys = {
            key
            for key in all_parent_keys
            # Metric info will be included in the metric-specific section
            if key.dataset_id in OTHER_SOURCE_TABLE_DATASETS -
            {DATAFLOW_METRICS_DATASET}
        }

        metric_keys = {
            key
            for key in all_parent_keys
            if key.dataset_id == DATAFLOW_METRICS_DATASET
        }

        # Remove metric keys as they are surfaced in a metric-specific section. Remove
        # source table keys as they are surfaced in a reference-specific section
        view_keys = all_parent_keys - metric_keys - source_keys

        documentation += self._get_views_str_for_product(view_keys)
        documentation += self._get_source_tables_str_for_product(source_keys)
        documentation += self._get_metrics_str_for_product(metric_keys)

        return documentation

    @staticmethod
    def _normalize_string_for_path(target_string: str) -> str:
        """Returns a lowercase, underscore-separated string."""
        return target_string.lower().replace(" ", "_")

    def generate_products_markdowns(self) -> bool:
        """Generates markdown files if necessary for the docs/calculation/products
        directories"""
        anything_modified = False
        for product in self.products:
            # Generate documentation for each product
            documentation = self._get_product_information(product)

            # Write documentation to markdown files
            product_name_for_path = self._normalize_string_for_path(
                product.name)
            product_dir_path = os.path.join(self.root_calc_docs_dir,
                                            "products", product_name_for_path)
            os.makedirs(product_dir_path, exist_ok=True)

            product_markdown_path = os.path.join(
                product_dir_path,
                f"{product_name_for_path}_summary.md",
            )

            anything_modified |= persist_file_contents(documentation,
                                                       product_markdown_path)
        return anything_modified

    @staticmethod
    def _get_state_metric_calculations(
            pipelines: List[YAMLDict],
            frequency: str) -> Dict[str, List[PipelineMetricInfo]]:
        """Returns a dict of state names to lists of info about their regularly
        calculated metrics."""
        state_metric_calculations = defaultdict(list)
        for pipeline in pipelines:
            state_metric_calculations[str(
                StateCode(pipeline.peek("state_code", str)).get_state()
            )].extend([
                PipelineMetricInfo(
                    name=metric,
                    month_count=pipeline.peek_optional(
                        "calculation_month_count", int),
                    frequency=frequency,
                ) for metric in pipeline.peek("metric_types", str).split()
            ], )
        return state_metric_calculations

    def _get_sorted_state_metric_info(
            self) -> Dict[str, List[PipelineMetricInfo]]:
        """Returns a dictionary of state names (in alphabetical order) to their
        regularly calculated metric information (sorted by metric name)"""
        sorted_state_metric_calculations: Dict[
            str, List[PipelineMetricInfo]] = {
                state_name_key: sorted(
                    self.metric_calculations_by_state[state_name_key],
                    key=lambda info: info.name,
                )
                for state_name_key in sorted(self.metric_calculations_by_state)
            }
        return sorted_state_metric_calculations

    def _get_metrics_table_for_state(self, state_name: str) -> str:
        sorted_state_metric_calculations = self._get_sorted_state_metric_info()
        metric_names_to_tables = {
            metric.value: table
            for table, metric in DATAFLOW_TABLES_TO_METRIC_TYPES.items()
        }
        if state_name in sorted_state_metric_calculations:
            headers = [
                "**Metric**",
                "**Number of Months Calculated**",
                "**Calculation Frequency**",
            ]
            table_matrix = [[
                f"[{metric_info.name}](../metrics/{self.generic_types_by_metric_name[metric_names_to_tables[metric_info.name]].lower()}/{metric_names_to_tables[metric_info.name]}.md)",
                metric_info.month_count if metric_info.month_count else "N/A",
                metric_info.frequency,
            ] for metric_info in sorted_state_metric_calculations[state_name]]
            writer = MarkdownTableWriter(headers=headers,
                                         value_matrix=table_matrix,
                                         margin=0)
            return writer.dumps()
        return "_This state has no regularly calculated metrics._"

    def _get_state_information(self, state_code: StateCode,
                               state_name: str) -> str:
        """Returns string contents for the state markdown."""
        documentation = f"#{state_name}\n\n"

        # Products section
        documentation += "##Shipped Products\n\n"
        documentation += self.products_list_for_env(state_code,
                                                    GCPEnvironment.PRODUCTION)
        documentation += "\n\n##Products in Development\n\n"
        documentation += self.products_list_for_env(state_code,
                                                    GCPEnvironment.STAGING)

        # Metrics section
        documentation += "\n\n##Regularly Calculated Metrics\n\n"

        documentation += self._get_metrics_table_for_state(state_name)

        return documentation

    def generate_states_markdowns(self) -> bool:
        """Generate markdown files for each state."""
        anything_modified = False

        states_dir_path = os.path.join(self.root_calc_docs_dir, "states")
        os.makedirs(states_dir_path, exist_ok=True)

        for state_code in self._get_dataflow_pipeline_enabled_states():
            state_name = str(state_code.get_state())

            # Generate documentation
            documentation = self._get_state_information(state_code, state_name)

            # Write to markdown files
            states_markdown_path = os.path.join(
                states_dir_path,
                f"{self._normalize_string_for_path(state_name)}.md",
            )
            anything_modified |= persist_file_contents(documentation,
                                                       states_markdown_path)
        return anything_modified

    def _dependency_tree_formatter_for_gitbook(
        self,
        dag_key: DagKey,
        products_section: bool = False,
    ) -> str:
        """Gitbook-specific formatting for the generated dependency tree."""
        is_source_table = dag_key.dataset_id in OTHER_SOURCE_TABLE_DATASETS - {
            DATAFLOW_METRICS_DATASET
        }
        is_raw_data_table = dag_key.dataset_id in LATEST_VIEW_DATASETS
        is_metric = dag_key.dataset_id in DATAFLOW_METRICS_DATASET
        is_documented_view = not (is_source_table or is_raw_data_table
                                  or is_metric)
        if is_raw_data_table and (
                not dag_key.dataset_id.endswith("_raw_data_up_to_date_views")
                or not dag_key.table_id.endswith("_latest")):
            raise ValueError(
                f"Unexpected raw data view address: [{dag_key.dataset_id}.{dag_key.table_id}]"
            )

        staging_link = BQ_LINK_TEMPLATE.format(
            project="recidiviz-staging",
            dataset_id=dag_key.dataset_id,
            table_id=dag_key.table_id,
        )
        prod_link = BQ_LINK_TEMPLATE.format(
            project="recidiviz-123",
            dataset_id=dag_key.dataset_id,
            table_id=dag_key.table_id,
        )

        table_name_str = (
            # Include brackets if metric or view
            ("[" if products_section else f"[{dag_key.dataset_id}.") +
            f"{dag_key.table_id.replace('__', ESCAPED_DOUBLE_UNDERSCORE)}]"
            if is_documented_view or is_metric else
            ("" if products_section else f"{dag_key.dataset_id}.") +
            f"{dag_key.table_id.replace('__', ESCAPED_DOUBLE_UNDERSCORE)}")

        if is_source_table:
            table_name_str += (
                f" ([BQ Staging]({staging_link})) ([BQ Prod]({prod_link}))")
        elif is_metric:
            table_name_str += f"(../../metrics/{self.generic_types_by_metric_name[dag_key.table_id].lower()}/{dag_key.table_id}.md)"
        elif is_raw_data_table:
            table_name_str += RAW_DATA_LINKS_TEMPLATE.format(
                region=dag_key.dataset_id[:-26],
                raw_data_table=dag_key.table_id[:-7],
                staging_link=staging_link,
                prod_link=prod_link,
            )
        else:
            table_name_str += f"(../{'../views/' if products_section else ''}{dag_key.dataset_id}/{dag_key.table_id}.md)"

        return table_name_str + " <br/>"

    @staticmethod
    def _get_view_tree_string(
        node_family: BigQueryViewDagNodeFamily,
        dag_key: DagKey,
        descendants: bool = False,
    ) -> str:
        if (node_family.full_parentage
                and not descendants) or (node_family.full_descendants
                                         and descendants):
            # Gitbook has a line count limit of ~525 for the view markdowns, so we
            # only print trees if they are small enough. Otherwise, we direct readers
            # to a script that prints the tree to console.
            if descendants:
                if (node_family.child_dfs_tree_str.count("<br/>") <
                        MAX_DEPENDENCY_TREE_LENGTH):
                    return node_family.child_dfs_tree_str
            else:
                if (node_family.parent_dfs_tree_str.count("<br/>") <
                        MAX_DEPENDENCY_TREE_LENGTH):
                    return node_family.parent_dfs_tree_str
            return DEPENDENCY_TREE_SCRIPT_TEMPLATE.format(
                dataset_id=dag_key.dataset_id,
                table_id=dag_key.table_id,
                descendants=descendants,
            )
        return f"This view has no {'child' if descendants else 'parent'} dependencies."

    def _get_view_information(self, view_key: DagKey) -> str:
        """Returns string contents for a view markdown."""
        view_node = self.dag_walker.nodes_by_key[view_key]

        # Gitbook only supports italicizing single lines so we need to ensure multi-line
        # descriptions format correctly
        formatted_description = "_<br/>\n_".join([
            line.strip() for line in view_node.view.description.splitlines()
            if line.strip()
        ])
        staging_link = BQ_LINK_TEMPLATE.format(
            project="recidiviz-staging",
            dataset_id=view_key.dataset_id,
            table_id=view_key.table_id,
        )
        prod_link = BQ_LINK_TEMPLATE.format(
            project="recidiviz-123",
            dataset_id=view_key.dataset_id,
            table_id=view_key.table_id,
        )
        documentation = VIEW_DOCS_TEMPLATE.format(
            view_dataset_id=view_key.dataset_id,
            view_table_id=view_key.table_id,
            description=formatted_description,
            staging_link=staging_link,
            prod_link=prod_link,
            parent_tree=self._get_view_tree_string(view_node.node_family,
                                                   view_key),
            child_tree=self._get_view_tree_string(view_node.node_family,
                                                  view_key,
                                                  descendants=True),
        )
        return documentation

    def _get_all_views_to_document(self) -> Set[DagKey]:
        """Retrieve all DAG Walker views that we want to document"""
        all_nodes = self.dag_walker.nodes_by_key.values()
        all_view_keys = {
            DagKey(view_address=node.view.address)
            for node in all_nodes if node.dag_key.dataset_id not in
            DATASETS_TO_SKIP_VIEW_DOCUMENTATION
        }
        return all_view_keys

    def generate_view_markdowns(self) -> bool:
        """Generate markdown files for each view."""
        anything_modified = False
        views_dir_path = os.path.join(self.root_calc_docs_dir, "views")
        os.makedirs(views_dir_path, exist_ok=True)

        for view_key in self.all_views_to_document:
            # Generate documentation
            documentation = self._get_view_information(view_key)

            # Write to markdown files
            dataset_dir = os.path.join(
                views_dir_path,
                view_key.dataset_id,
            )
            os.makedirs(dataset_dir, exist_ok=True)

            view_markdown_path = os.path.join(
                dataset_dir,
                f"{view_key.table_id}.md",
            )
            anything_modified |= persist_file_contents(documentation,
                                                       view_markdown_path)
        return anything_modified

    def _get_metric_information(self, metric: Type[RecidivizMetric]) -> str:
        """Returns string contents for a metric markdown."""
        metric_table_id = DATAFLOW_METRICS_TO_TABLES[metric]
        metric_type = DATAFLOW_TABLES_TO_METRIC_TYPES[metric_table_id].value

        state_infos_list = sorted(
            self.state_metric_calculations_by_metric[metric_type],
            key=lambda info: (info.name, info.month_count),
        )
        headers = [
            "**State**",
            "**Number of Months Calculated**",
            "**Calculation Frequency**",
        ]
        table_matrix = [[
            f"[{state_info.name}](../../states/{self._normalize_string_for_path(state_info.name)}.md)",
            state_info.month_count if state_info.month_count else "N/A",
            state_info.frequency,
        ] for state_info in state_infos_list]
        writer = MarkdownTableWriter(headers=headers,
                                     value_matrix=table_matrix,
                                     margin=0)

        documentation = METRIC_DOCS_TEMPLATE.format(
            staging_link=BQ_LINK_TEMPLATE.format(
                project="recidiviz-staging",
                dataset_id="dataflow_metrics",
                table_id=metric_table_id,
            ),
            prod_link=BQ_LINK_TEMPLATE.format(
                project="recidiviz-123",
                dataset_id="dataflow_metrics",
                table_id=metric_table_id,
            ),
            metric_name=metric.__name__,
            description=metric.get_description(),
            metrics_cadence_table=writer.dumps(),
            metric_table_id=metric_table_id,
        )

        return documentation

    def generate_metric_markdowns(self) -> bool:
        """Generate markdown files for each metric."""
        anything_modified = False
        metrics_dir_path = os.path.join(self.root_calc_docs_dir, "metrics")
        os.makedirs(metrics_dir_path, exist_ok=True)

        for generic_type, class_list in sorted(
                self.metrics_by_generic_types.items()):
            generic_type_dir = os.path.join(
                metrics_dir_path,
                generic_type.lower(),
            )
            os.makedirs(generic_type_dir, exist_ok=True)

            for metric in class_list:
                # Generate documentation
                documentation = self._get_metric_information(metric)

                # Write to markdown files
                metric_markdown_path = os.path.join(
                    generic_type_dir,
                    f"{DATAFLOW_METRICS_TO_TABLES[metric]}.md",
                )
                anything_modified |= persist_file_contents(
                    documentation, metric_markdown_path)

        return anything_modified
    def test_populate_node_family_descendants_dfs_tree_str(self) -> None:
        view_1 = BigQueryView(
            dataset_id="dataset_1",
            view_id="table_1",
            description="table_1 description",
            view_query_template="SELECT * FROM `{project_id}.source_dataset.source_table`",
        )
        view_2 = BigQueryView(
            dataset_id="dataset_2",
            view_id="table_2",
            description="table_2 description",
            view_query_template="SELECT * FROM `{project_id}.source_dataset.source_table_2`",
        )
        view_3 = BigQueryView(
            dataset_id="dataset_3",
            view_id="table_3",
            description="table_3 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_1.table_1`
            JOIN `{project_id}.dataset_2.table_2`
            USING (col)""",
        )
        view_4 = BigQueryView(
            dataset_id="dataset_4",
            view_id="table_4",
            description="table_4 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_3.table_3`""",
        )
        view_5 = BigQueryView(
            dataset_id="dataset_5",
            view_id="table_5",
            description="table_5 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_3.table_3`""",
        )
        view_6 = BigQueryView(
            dataset_id="dataset_6",
            view_id="table_6",
            description="table_6 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_5.table_5`""",
        )
        dag_walker = BigQueryViewDagWalker(
            [view_1, view_2, view_3, view_4, view_5, view_6]
        )

        # Top level view
        node = dag_walker.node_for_view(view_2)
        dag_walker.populate_node_family_for_node(node=node)
        expected_tree = """dataset_2.table_2
|--dataset_3.table_3
|----dataset_4.table_4
|----dataset_5.table_5
|------dataset_6.table_6
"""
        self.assertEqual(expected_tree, node.node_family.child_dfs_tree_str)

        # Descendants from middle of tree
        node = dag_walker.node_for_view(view_3)
        dag_walker.populate_node_family_for_node(node=node)
        expected_tree = """dataset_3.table_3
|--dataset_4.table_4
|--dataset_5.table_5
|----dataset_6.table_6
"""
        self.assertEqual(expected_tree, node.node_family.child_dfs_tree_str)
    def test_populate_node_family_parentage_dfs_tree_str(self) -> None:
        view_1 = BigQueryView(
            dataset_id="dataset_1",
            view_id="table_1",
            description="table_1 description",
            view_query_template="SELECT * FROM `{project_id}.source_dataset.source_table`",
        )
        view_2 = BigQueryView(
            dataset_id="dataset_2",
            view_id="table_2",
            description="table_2 description",
            view_query_template="SELECT * FROM `{project_id}.source_dataset.source_table_2`",
        )
        view_3 = BigQueryView(
            dataset_id="dataset_3",
            view_id="table_3",
            description="table_3 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_1.table_1`
            JOIN `{project_id}.dataset_2.table_2`
            USING (col)""",
        )
        view_4 = BigQueryView(
            dataset_id="dataset_4",
            view_id="table_4",
            description="table_4 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_3.table_3`""",
        )
        view_5 = BigQueryView(
            dataset_id="dataset_5",
            view_id="table_5",
            description="table_5 description",
            view_query_template="""
            SELECT * FROM `{project_id}.dataset_3.table_3`""",
        )
        dag_walker = BigQueryViewDagWalker([view_1, view_2, view_3, view_4, view_5])

        # Top level view
        node = dag_walker.node_for_view(view_5)
        dag_walker.populate_node_family_for_node(node=node)
        expected_tree = """dataset_5.table_5
|--dataset_3.table_3
|----dataset_2.table_2
|------source_dataset.source_table_2
|----dataset_1.table_1
|------source_dataset.source_table
"""
        self.assertEqual(expected_tree, node.node_family.parent_dfs_tree_str)

        # Middle of tree
        node = dag_walker.node_for_view(view_3)
        dag_walker.populate_node_family_for_node(node=node)
        expected_tree = """dataset_3.table_3
|--dataset_2.table_2
|----source_dataset.source_table_2
|--dataset_1.table_1
|----source_dataset.source_table
"""
        self.assertEqual(expected_tree, node.node_family.parent_dfs_tree_str)

        # Skip datasets
        dag_walker.populate_node_family_for_node(
            node=node,
            datasets_to_skip={"dataset_1"},
        )
        expected_tree = """dataset_3.table_3
|--dataset_2.table_2
|----source_dataset.source_table_2
|--source_dataset.source_table
"""
        self.assertEqual(expected_tree, node.node_family.parent_dfs_tree_str)

        # Custom formatted
        def _custom_formatter(dag_key: DagKey) -> str:
            return f"custom_formatted_{dag_key.dataset_id}_{dag_key.table_id}"

        dag_walker.populate_node_family_for_node(
            node=node, custom_node_formatter=_custom_formatter
        )

        expected_tree = """custom_formatted_dataset_3_table_3
|--custom_formatted_dataset_2_table_2
|----custom_formatted_source_dataset_source_table_2
|--custom_formatted_dataset_1_table_1
|----custom_formatted_source_dataset_source_table
"""
        self.assertEqual(expected_tree, node.node_family.parent_dfs_tree_str)
    def test_populate_node_family_full_parentage(self) -> None:
        dag_walker = BigQueryViewDagWalker(self.x_shaped_dag_views_list)

        # root node start
        start_node = dag_walker.node_for_view(self.x_shaped_dag_views_list[0])
        dag_walker.populate_node_family_for_node(
            node=start_node, view_source_table_datasets={"source_dataset"}
        )

        self.assertEqual(
            {
                DagKey(
                    view_address=BigQueryAddress(
                        dataset_id="source_dataset", table_id="source_table"
                    )
                )
            },
            start_node.node_family.full_parentage,
        )

        # start in middle
        start_node = dag_walker.node_for_view(self.x_shaped_dag_views_list[2])
        dag_walker.populate_node_family_for_node(
            node=start_node, view_source_table_datasets={"source_dataset"}
        )
        expected_parent_nodes = {
            DagKey(
                view_address=BigQueryAddress(
                    dataset_id="source_dataset", table_id="source_table"
                )
            ),
            DagKey(
                view_address=BigQueryAddress(
                    dataset_id="source_dataset", table_id="source_table_2"
                )
            ),
            DagKey.for_view(self.x_shaped_dag_views_list[0]),
            DagKey.for_view(self.x_shaped_dag_views_list[1]),
        }
        self.assertEqual(
            expected_parent_nodes,
            start_node.node_family.full_parentage,
        )

        # single start node
        start_node = dag_walker.node_for_view(self.x_shaped_dag_views_list[3])
        dag_walker.populate_node_family_for_node(
            node=start_node, view_source_table_datasets={"source_dataset"}
        )
        expected_parent_nodes = {
            DagKey(
                view_address=BigQueryAddress(
                    dataset_id="source_dataset", table_id="source_table"
                )
            ),
            DagKey(
                view_address=BigQueryAddress(
                    dataset_id="source_dataset", table_id="source_table_2"
                )
            ),
            DagKey.for_view(self.x_shaped_dag_views_list[0]),
            DagKey.for_view(self.x_shaped_dag_views_list[1]),
            DagKey.for_view(self.x_shaped_dag_views_list[2]),
        }
        self.assertEqual(
            expected_parent_nodes,
            start_node.node_family.full_parentage,
        )

        # multiple start nodes
        start_nodes = [
            start_node,
            dag_walker.node_for_view(self.x_shaped_dag_views_list[4]),
        ]

        parentage_set: Set[DagKey] = set()
        for node in start_nodes:
            dag_walker.populate_node_family_for_node(
                node=node, view_source_table_datasets={"source_dataset"}
            )
            parentage_set = parentage_set.union(node.node_family.full_parentage)

        self.assertEqual(expected_parent_nodes, parentage_set)