def test_create_and_load_new_award_index(award_data_fixture,
                                         elasticsearch_award_index,
                                         monkeypatch):
    """Test the ``elasticsearch_loader`` django management command to create a new awards index and load it
    with data from the DB
    """
    client = elasticsearch_award_index.client  # type: Elasticsearch

    # Ensure index is not yet created
    assert not client.indices.exists(elasticsearch_award_index.index_name)
    original_db_awards_count = Award.objects.count()

    # Inject ETL arg into config for this run, which loads a newly created index
    elasticsearch_award_index.etl_config["create_new_index"] = True
    es_etl_config = _process_es_etl_test_config(client,
                                                elasticsearch_award_index)

    # Must use mock sql function to share test DB conn+transaction in ETL code
    # Patching on the module into which it is imported, not the module where it is defined
    monkeypatch.setattr(
        "usaspending_api.etl.elasticsearch_loader_helpers.extract_data.execute_sql_statement",
        mock_execute_sql)
    # Also override SQL function listed in config object with the mock one
    es_etl_config["execute_sql_func"] = mock_execute_sql
    loader = Controller(es_etl_config)
    assert loader.__class__.__name__ == "Controller"
    loader.prepare_for_etl()
    loader.dispatch_tasks()
    # Along with other things, this will refresh the index, to surface loaded docs
    set_final_index_config(client, elasticsearch_award_index.index_name)

    assert client.indices.exists(elasticsearch_award_index.index_name)
    es_award_docs = client.count(
        index=elasticsearch_award_index.index_name)["count"]
    assert es_award_docs == original_db_awards_count
def test_incremental_load_into_award_index(award_data_fixture,
                                           elasticsearch_award_index,
                                           monkeypatch):
    """Test the ``elasticsearch_loader`` django management command to incrementally load updated data into the awards ES
    index from the DB, overwriting the doc that was already there
    """
    original_db_awards_count = Award.objects.count()
    elasticsearch_award_index.update_index()
    client = elasticsearch_award_index.client  # type: Elasticsearch
    assert client.indices.exists(elasticsearch_award_index.index_name)
    es_award_docs = client.count(
        index=elasticsearch_award_index.index_name)["count"]
    assert es_award_docs == original_db_awards_count

    # Inject ETL arg into config for this run, to suppress processing deletes. Test incremental load only
    elasticsearch_award_index.etl_config["process_deletes"] = False
    elasticsearch_award_index.etl_config["start_datetime"] = datetime.now(
        timezone.utc)
    es_etl_config = _process_es_etl_test_config(client,
                                                elasticsearch_award_index)

    # Now modify one of the DB objects
    awd = Award.objects.first()  # type: Award
    awd.total_obligation = 9999
    awd.save()

    # Must use mock sql function to share test DB conn+transaction in ETL code
    # Patching on the module into which it is imported, not the module where it is defined
    monkeypatch.setattr(
        "usaspending_api.etl.elasticsearch_loader_helpers.extract_data.execute_sql_statement",
        mock_execute_sql)
    # Also override SQL function listed in config object with the mock one
    es_etl_config["execute_sql_func"] = mock_execute_sql
    ensure_view_exists(es_etl_config["sql_view"], force=True)
    loader = Controller(es_etl_config)
    assert loader.__class__.__name__ == "Controller"
    loader.prepare_for_etl()
    loader.dispatch_tasks()
    client.indices.refresh(elasticsearch_award_index.index_name)

    assert client.indices.exists(elasticsearch_award_index.index_name)
    es_award_docs = client.count(
        index=elasticsearch_award_index.index_name)["count"]
    assert es_award_docs == original_db_awards_count
    es_awards = client.search(index=elasticsearch_award_index.index_name)
    updated_award = [
        a for a in es_awards["hits"]["hits"]
        if a["_source"]["award_id"] == awd.id
    ][0]
    assert int(updated_award["_source"]["total_obligation"]) == 9999
示例#3
0
    def handle(self, *args, **options):
        elasticsearch_client = instantiate_elasticsearch_client()
        config = parse_cli_args(options, elasticsearch_client)

        start = perf_counter()
        logger.info(format_log(f"Starting script\n{'=' * 56}"))
        start_msg = "target index: {index_name} | Starting from: {starting_date}"
        logger.info(format_log(start_msg.format(**config)))

        ensure_view_exists(config["sql_view"], force=True)
        error_addition = ""
        loader = Controller(config)

        if config["is_incremental_load"]:
            toggle_refresh_off(elasticsearch_client,
                               config["index_name"])  # Turned back on at end.

        try:
            if config["process_deletes"]:
                loader.run_deletes()

            if not config["deletes_only"]:
                loader.prepare_for_etl()
                loader.dispatch_tasks()
        except Exception as e:
            logger.error(f"{str(e)}")
            error_addition = "before encountering a problem during execution.... "
            raise SystemExit(1)
        else:
            loader.complete_process()
            if config["drop_db_view"]:
                logger.info(
                    format_log(f"Dropping SQL view '{config['sql_view']}'"))
                drop_etl_view(config["sql_view"], True)
        finally:
            msg = f"Script duration was {perf_counter() - start:.2f}s {error_addition}|"
            headers = f"{'-' * (len(msg) - 2)} |"
            logger.info(format_log(headers))
            logger.info(format_log(msg))
            logger.info(format_log(headers))

        # Used to help pipeline determine when job passed but needs attention
        if config["raise_status_code_3"]:
            raise SystemExit(3)