def test_non_culled_host_is_not_removed( event_producer_mock, event_datetime_mock, db_create_host, db_get_hosts, inventory_config ): staleness_timestamps = get_staleness_timestamps() created_hosts = [] for stale_timestamp in ( staleness_timestamps["stale_warning"], staleness_timestamps["stale"], staleness_timestamps["fresh"], ): host = minimal_db_host(stale_timestamp=stale_timestamp.isoformat(), reporter="some reporter") created_host = db_create_host(host) created_hosts.append(created_host) created_host_ids = [host.id for host in created_hosts] retrieved_hosts = db_get_hosts(created_host_ids) assert created_host_ids == [host.id for host in retrieved_hosts] threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE host_reaper_run( inventory_config, mock.Mock(), db.session, event_producer_mock, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) retrieved_hosts = db_get_hosts(created_host_ids) assert created_host_ids == [host.id for host in retrieved_hosts] assert event_producer_mock.event is None
def test_reaper_shutdown_handler(event_datetime_mock, db_create_host, db_get_hosts, inventory_config): staleness_timestamps = get_staleness_timestamps() created_host_ids = [] host_count = 3 for _ in range(host_count): host_data = minimal_db_host( stale_timestamp=staleness_timestamps["culled"].isoformat(), reporter="some reporter" ) created_host = db_create_host(host_data) created_host_ids.append(created_host.id) created_hosts = db_get_hosts(created_host_ids) assert created_hosts.count() == host_count event_producer_mock = mock.Mock() threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE host_reaper_run( inventory_config, mock.Mock(), db.session, event_producer_mock, shutdown_handler=mock.Mock(**{"shut_down.side_effect": (False, True)}), ) remaining_hosts = db_get_hosts(created_host_ids) assert remaining_hosts.count() == 1 assert event_producer_mock.write_event.call_count == 2
def test_synchronize_host_event(event_producer_mock, event_datetime_mock, db_create_host, db_get_host, inventory_config): staleness_timestamps = get_staleness_timestamps() host = minimal_db_host( stale_timestamp=staleness_timestamps["culled"].isoformat(), reporter="some reporter") created_host = db_create_host(host) assert db_get_host(created_host.id) threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE host_synchronizer_run( inventory_config, mock.Mock(), db.session, event_producer_mock, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) # check if host exist thought event synchronizer must find it to produce an update event. assert db_get_host(created_host.id) assert_synchronize_event_is_valid(event_producer=event_producer_mock, host=created_host, timestamp=event_datetime_mock)
def test_culled_host_is_removed(event_producer_mock, event_datetime_mock, db_create_host, db_get_host, inventory_config): staleness_timestamps = get_staleness_timestamps() host = minimal_db_host( stale_timestamp=staleness_timestamps["culled"].isoformat(), reporter="some reporter") created_host = db_create_host(host) assert db_get_host(created_host.id) threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE host_reaper_run( inventory_config, mock.Mock(), db.session, event_producer_mock, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) assert not db_get_host(created_host.id) assert_delete_event_is_valid(event_producer=event_producer_mock, host=created_host, timestamp=event_datetime_mock)
def test_reaper_stops_after_kafka_producer_error(send_side_effects, event_producer, db_create_multiple_hosts, db_get_hosts, inventory_config, mocker): mocker.patch("lib.host_delete.kafka_available") event_producer._kafka_producer.send.side_effect = send_side_effects staleness_timestamps = get_staleness_timestamps() host_count = 3 created_hosts = db_create_multiple_hosts( how_many=host_count, extra_data={"stale_timestamp": staleness_timestamps["culled"]}) created_host_ids = [str(host.id) for host in created_hosts] hosts = db_get_hosts(created_host_ids) assert hosts.count() == host_count threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE with pytest.raises(KafkaError): host_reaper_run( inventory_config, mock.Mock(), db.session, event_producer, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) remaining_hosts = db_get_hosts(created_host_ids) assert remaining_hosts.count() == 2 assert event_producer._kafka_producer.send.call_count == 2
def mq_create_hosts_in_all_states(mq_create_or_update_host): staleness_timestamps = get_staleness_timestamps() created_hosts = {} for state, timestamp in staleness_timestamps.items(): host = minimal_host( insights_id=generate_uuid(), stale_timestamp=timestamp.isoformat(), reporter="some reporter", facts=FACTS ) created_hosts[state] = mq_create_or_update_host(host) return created_hosts
def test_replace_facts_on_multiple_culled_hosts(db_create_multiple_hosts, db_get_hosts, api_put): staleness_timestamps = get_staleness_timestamps() created_hosts = db_create_multiple_hosts( how_many=2, extra_data={"facts": DB_FACTS, "stale_timestamp": staleness_timestamps["culled"]} ) facts_url = build_facts_url(host_list_or_id=created_hosts, namespace=DB_FACTS_NAMESPACE) # Try to replace the facts on a host that has been marked as culled response_status, response_data = api_put(facts_url, DB_NEW_FACTS) assert_response_status(response_status, expected_status=400)
def test_delete_duplicates_customer_scenario_2(event_producer, db_create_host, db_get_host, inventory_config): staleness_timestamps = get_staleness_timestamps() rhsm_id = generate_uuid() bios_uuid = generate_uuid() canonical_facts = { "insights_id": generate_uuid(), "subscription_manager_id": rhsm_id, "bios_uuid": bios_uuid, "satellite_id": rhsm_id, "fqdn": "rozrhjrad01.base.srvco.net", "ip_addresses": ["10.230.230.10", "10.230.230.13"], "mac_addresses": ["00:50:56:ac:56:45", "00:50:56:ac:48:61", "00:00:00:00:00:00"], } host_data = { "stale_timestamp": staleness_timestamps["stale_warning"], "reporter": "puptoo", "canonical_facts": canonical_facts, } host1 = minimal_db_host(**host_data) created_host1 = db_create_host(host=host1) host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.3", "10.230.230.4"] host2 = minimal_db_host(**host_data) created_host2 = db_create_host(host=host2) host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.1", "10.230.230.4"] host_data["stale_timestamp"] = staleness_timestamps["fresh"] host3 = minimal_db_host(**host_data) created_host3 = db_create_host(host=host3) assert db_get_host(created_host1.id) assert db_get_host(created_host2.id) assert db_get_host(created_host3.id) Session = _init_db(inventory_config) sessions = [Session() for _ in range(3)] with multi_session_guard(sessions): deleted_hosts_count = host_delete_duplicates_run( inventory_config, mock.Mock(), *sessions, event_producer, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) assert deleted_hosts_count == 2 assert not db_get_host(created_host1.id) assert not db_get_host(created_host2.id) assert db_get_host(created_host3.id)
def test_culled_edge_host_is_not_removed(event_producer_mock, db_create_host, db_get_host, inventory_config): staleness_timestamps = get_staleness_timestamps() host = minimal_db_host( stale_timestamp=staleness_timestamps["culled"], reporter="some reporter", system_profile_facts={"host_type": "edge"}, ) created_host = db_create_host(host=host) assert db_get_host(created_host.id) threadctx.request_id = UNKNOWN_REQUEST_ID_VALUE host_reaper_run( inventory_config, mock.Mock(), db.session, event_producer_mock, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) assert db_get_host(created_host.id)
def test_delete_duplicates_multiple_scenarios( event_producer, db_create_host, db_create_multiple_hosts, db_get_host, inventory_config, script_function ): chunk_size = inventory_config.script_chunk_size # Customer scenario staleness_timestamps = get_staleness_timestamps() rhsm_id = generate_uuid() bios_uuid = generate_uuid() canonical_facts = { "insights_id": generate_uuid(), "subscription_manager_id": rhsm_id, "bios_uuid": bios_uuid, "satellite_id": rhsm_id, "fqdn": "rozrhjrad01.base.srvco.net", "ip_addresses": ["10.230.230.10", "10.230.230.13"], "mac_addresses": ["00:50:56:ac:56:45", "00:50:56:ac:48:61", "00:00:00:00:00:00"], } host_data = { "stale_timestamp": staleness_timestamps["stale_warning"], "reporter": "puptoo", "canonical_facts": canonical_facts, } customer_host1 = minimal_db_host(**host_data) customer_created_host1 = db_create_host(host=customer_host1).id host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.3", "10.230.230.4"] customer_host2 = minimal_db_host(**host_data) customer_created_host2 = db_create_host(host=customer_host2).id host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.1", "10.230.230.4"] host_data["stale_timestamp"] = staleness_timestamps["fresh"] customer_host3 = minimal_db_host(**host_data) customer_created_host3 = db_create_host(host=customer_host3).id assert db_get_host(customer_created_host1) assert db_get_host(customer_created_host2) assert db_get_host(customer_created_host3) # Matching elevated ID def _gen_canonical_facts(): return { "insights_id": generate_uuid(), "subscription_manager_id": generate_uuid(), "bios_uuid": generate_uuid(), "satellite_id": generate_uuid(), "fqdn": generate_random_string(), } elevated_matching_host_count = 10 elevated_id = generate_uuid() elevated_matching_created_hosts = [] # Hosts with the same amount of canonical facts for _ in range(elevated_matching_host_count): canonical_facts = _gen_canonical_facts() canonical_facts["insights_id"] = elevated_id host = minimal_db_host(canonical_facts=canonical_facts) elevated_matching_created_hosts.append(db_create_host(host=host).id) # Hosts with less canonical facts for _ in range(elevated_matching_host_count): canonical_facts = {"insights_id": elevated_id} host = minimal_db_host(canonical_facts=canonical_facts) elevated_matching_created_hosts.append(db_create_host(host=host).id) # Create a lot of hosts to test that the script deletes duplicates in multiple chunks db_create_multiple_hosts(how_many=chunk_size) # Hosts with more canonical facts for _ in range(elevated_matching_host_count): canonical_facts = _gen_canonical_facts() canonical_facts["insights_id"] = elevated_id canonical_facts["ip_addresses"] = [f"10.0.0.{randint(1, 255)}"] host = minimal_db_host(canonical_facts=canonical_facts) elevated_matching_created_hosts.append(db_create_host(host=host).id) for host in elevated_matching_created_hosts: assert db_get_host(host) # Elevated IDs not matching elevated_not_matching_canonical_facts = _gen_canonical_facts() elevated_not_matching_host_count = 10 elevated_not_matching_created_hosts = [] # Hosts with the same amount of canonical facts for _ in range(elevated_not_matching_host_count): elevated_not_matching_canonical_facts["insights_id"] = generate_uuid() host = minimal_db_host(canonical_facts=elevated_not_matching_canonical_facts) elevated_not_matching_created_hosts.append(db_create_host(host=host).id) # Hosts with less canonical facts for _ in range(elevated_not_matching_host_count): facts = {"insights_id": generate_uuid()} host = minimal_db_host(canonical_facts=facts) elevated_not_matching_created_hosts.append(db_create_host(host=host).id) # Hosts with more canonical facts for _ in range(elevated_not_matching_host_count): elevated_not_matching_canonical_facts["insights_id"] = generate_uuid() elevated_not_matching_canonical_facts["ip_addresses"] = ["10.0.0.10"] host = minimal_db_host(canonical_facts=elevated_not_matching_canonical_facts) elevated_not_matching_created_hosts.append(db_create_host(host=host).id) for host in elevated_not_matching_created_hosts: assert db_get_host(host) # Without elevated IDs - canonical facts matching without_elevated_matching_canonical_facts = { "bios_uuid": generate_uuid(), "satellite_id": generate_uuid(), "fqdn": generate_random_string(), "ip_addresses": ["10.0.0.1"], "mac_addresses": ["aa:bb:cc:dd:ee:ff"], } without_elevated_matching_host_count = 10 without_elevated_matching_created_hosts = [] # Hosts with less canonical facts for fact in without_elevated_matching_canonical_facts: facts = {fact: without_elevated_matching_canonical_facts[fact]} host = minimal_db_host(canonical_facts=facts) without_elevated_matching_created_hosts.append(db_create_host(host=host).id) # Create a lot of hosts to test that the script deletes duplicates in multiple chunks db_create_multiple_hosts(how_many=chunk_size) # Hosts with the same amount of canonical facts for _ in range(without_elevated_matching_host_count): host = minimal_db_host(canonical_facts=without_elevated_matching_canonical_facts) without_elevated_matching_created_hosts.append(db_create_host(host=host).id) for host in without_elevated_matching_created_hosts: assert db_get_host(host) # Without elevated IDs - canonical facts not matching without_elevated_not_matching_canonical_facts = { "bios_uuid": generate_uuid(), "satellite_id": generate_uuid(), "fqdn": generate_random_string(), "ip_addresses": ["0.0.0.0"], "mac_addresses": ["aa:bb:cc:dd:ee:ff"], } without_elevated_not_matching_host_count = 10 without_elevated_not_matching_created_hosts = [] # Hosts with the same amount of canonical facts for _ in range(without_elevated_not_matching_host_count): facts = deepcopy(without_elevated_not_matching_canonical_facts) facts["fqdn"] = generate_random_string() host = minimal_db_host(canonical_facts=facts) without_elevated_not_matching_created_hosts.append(db_create_host(host=host).id) # Hosts with less canonical facts for _ in range(without_elevated_not_matching_host_count): facts = {"fqdn": generate_random_string()} host = minimal_db_host(canonical_facts=facts) without_elevated_not_matching_created_hosts.append(db_create_host(host=host).id) # Hosts with more canonical facts for fact in ELEVATED_IDS: facts = deepcopy(without_elevated_not_matching_canonical_facts) facts["fqdn"] = generate_random_string() facts[fact] = generate_uuid() if fact == "provider_id": facts["provider_type"] = "aws" host = minimal_db_host(canonical_facts=facts) without_elevated_not_matching_created_hosts.append(db_create_host(host=host).id) for host in without_elevated_not_matching_created_hosts: assert db_get_host(host) if script_function == "run": Session = _init_db(inventory_config) sessions = [Session() for _ in range(3)] with multi_session_guard(sessions): deleted_hosts_count = host_delete_duplicates_run( inventory_config, mock.Mock(), *sessions, event_producer, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) assert deleted_hosts_count == elevated_matching_host_count * 3 + without_elevated_matching_host_count + len( without_elevated_matching_canonical_facts ) else: host_delete_duplicates_main(mock.Mock()) assert not db_get_host(customer_created_host1) assert not db_get_host(customer_created_host2) assert db_get_host(customer_created_host3) for i in range(len(elevated_matching_created_hosts) - 1): assert not db_get_host(elevated_matching_created_hosts[i]) assert db_get_host(elevated_matching_created_hosts[-1]) for host in elevated_not_matching_created_hosts: assert db_get_host(host) for i in range(len(without_elevated_matching_created_hosts) - 1): assert not db_get_host(without_elevated_matching_created_hosts[i]) assert db_get_host(without_elevated_matching_created_hosts[-1]) for host in without_elevated_not_matching_created_hosts: assert db_get_host(host)
def test_delete_duplicates_customer_scenario_1(event_producer, db_create_host, db_get_host, inventory_config): staleness_timestamps = get_staleness_timestamps() rhsm_id = generate_uuid() bios_uuid = generate_uuid() canonical_facts = { "insights_id": generate_uuid(), "subscription_manager_id": rhsm_id, "bios_uuid": bios_uuid, "satellite_id": rhsm_id, "fqdn": "rn001018", "ip_addresses": ["10.230.230.3"], "mac_addresses": ["00:50:56:ab:5a:22", "00:00:00:00:00:00"], } host_data = { "stale_timestamp": staleness_timestamps["stale_warning"], "reporter": "puptoo", "canonical_facts": canonical_facts, } host1 = minimal_db_host(**host_data) created_host1 = db_create_host(host=host1) host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.30"] host_data["canonical_facts"].pop("bios_uuid") host_data["stale_timestamp"] = staleness_timestamps["stale"] host2 = minimal_db_host(**host_data) created_host2 = db_create_host(host=host2) host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.3"] host3 = minimal_db_host(**host_data) created_host3 = db_create_host(host=host3) host_data["reporter"] = "yupana" host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.1"] host_data["canonical_facts"]["mac_addresses"] = ["00:50:56:ab:5a:22"] host_data["canonical_facts"]["bios_uuid"] = bios_uuid host_data["canonical_facts"]["fqdn"] = "rn001018.bcbst.com" host_data["stale_timestamp"] = staleness_timestamps["fresh"] host4 = minimal_db_host(**host_data) created_host4 = db_create_host(host=host4) host_data["reporter"] = "puptoo" host_data["canonical_facts"]["ip_addresses"] = ["10.230.230.15"] host_data["canonical_facts"]["mac_addresses"] = ["00:50:56:ab:5a:22", "00:00:00:00:00:00"] host_data["canonical_facts"].pop("bios_uuid") host_data["canonical_facts"]["fqdn"] = "rn001018" host5 = minimal_db_host(**host_data) created_host5 = db_create_host(host=host5) assert db_get_host(created_host1.id) assert db_get_host(created_host2.id) assert db_get_host(created_host3.id) assert db_get_host(created_host4.id) assert db_get_host(created_host5.id) Session = _init_db(inventory_config) sessions = [Session() for _ in range(3)] with multi_session_guard(sessions): deleted_hosts_count = host_delete_duplicates_run( inventory_config, mock.Mock(), *sessions, event_producer, shutdown_handler=mock.Mock(**{"shut_down.return_value": False}), ) assert deleted_hosts_count == 4 assert not db_get_host(created_host1.id) assert not db_get_host(created_host2.id) assert not db_get_host(created_host3.id) assert not db_get_host(created_host4.id) assert db_get_host(created_host5.id)