def test_report_power_state_reports_all_exceptions(self): logger_twisted = self.useFixture(TwistedLoggerFixture()) logger_maaslog = self.useFixture(FakeLogger("maas")) # Avoid threads here. self.patch(power, "deferToThread", maybeDeferred) exception_type = factory.make_exception_type() exception_message = factory.make_string() exception = exception_type(exception_message) # Pretend the query always fails with `exception`. query = self.patch_autospec(power, self.func) query.side_effect = always_fail_with(exception) # Intercept calls to power_state_update() and send_node_event(). power_state_update = self.patch_autospec(power, "power_state_update") power_state_update.return_value = succeed(None) send_node_event = self.patch_autospec(power, "send_node_event") send_node_event.return_value = succeed(None) self.patch(self.power_driver, "detect_missing_packages").return_value = [] system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') context = sentinel.context clock = Clock() d = power.get_power_state(system_id, hostname, self.power_type, context, clock) d = power.report_power_state(d, system_id, hostname) # Crank through some number of retries. for wait in self.waits: self.assertFalse(d.called) clock.advance(wait) self.assertTrue(d.called) # Finally the exception from the query is raised. self.assertRaises(exception_type, extract_result, d) # The broken power query function patched earlier was called the same # number of times as there are steps in the default waiting policy. expected_call = call(system_id, hostname, self.power_type, context) expected_calls = [expected_call] * self.calls self.assertThat(query, MockCallsMatch(*expected_calls)) expected_message = ("%s: Power state could not be queried: %s" % (hostname, exception_message)) # An attempt was made to report the failure to the region. self.assertThat(power_state_update, MockCalledOnceWith(system_id, 'error')) # An attempt was made to log a node event with details. self.assertThat( send_node_event, MockCalledOnceWith(EVENT_TYPES.NODE_POWER_QUERY_FAILED, system_id, hostname, exception_message)) # Nothing was logged to the Twisted log. self.assertEqual("", logger_twisted.output) # A brief message is written to maaslog. self.assertEqual(expected_message + "\n", logger_maaslog.output)
def test_startProcessing_calls_start_when_looping_call_not_running(self): service = self.make_service(sentinel.listener) mock_start = self.patch(service.processing, "start") service.startProcessing() self.assertThat(mock_start, MockCalledOnceWith(0.1, now=False))
def assertScriptsMatch(self, *matchers): self.assertThat(parallel.test, MockCalledOnceWith(ANY, ANY, ANY)) suite, results, processes = parallel.test.call_args[0] self.assertThat(suite, AfterPreprocessing(list, MatchesSetwise(*matchers)))
def test__executes_rndc_command(self): self.patch_autospec(actions, "execute_rndc_command") actions.bind_reload() self.assertThat(actions.execute_rndc_command, MockCalledOnceWith(("reload", )))
def test_sets_self_as_process_group_leader(self): exception_type = factory.make_exception_type() os = self.patch(avahi_module, "os") os.setpgrp.side_effect = exception_type self.assertRaises(exception_type, run, []) self.assertThat(os.setpgrp, MockCalledOnceWith())
def test_changing_kms_host_triggers_update(self): dns_kms_setting_changed = self.patch_autospec( domain_module, "dns_kms_setting_changed") Config.objects.set_config('windows_kms_host', '8.8.8.8') self.assertThat(dns_kms_setting_changed, MockCalledOnceWith())
def test_file_saved(self): user = factory.make_User() file = factory.make_FileStorage(owner=None) save = self.patch(file, "save") give_file_to_user(file, user) self.assertThat(save, MockCalledOnceWith())
def test_warn_loopback_warns_about_any_IPv4_loopback(self): logger = self.patch(zonegenerator, "logger") loopback = "127.254.100.99" warn_loopback(loopback) self.assertThat(logger.warning, MockCalledOnceWith(ANY))
def test_warn_loopback_warns_about_IPv6_loopback(self): logger = self.patch(zonegenerator, "logger") loopback = "::1" warn_loopback(loopback) self.assertThat(logger.warning, MockCalledOnceWith(ANY))
def test__calls_reloadService(self): self.patch(settings, "PROXY_CONNECT", True) yield deferToDatabase(self.make_subnet) yield proxyconfig.proxy_update_config() self.assertThat(self.service_monitor.reloadService, MockCalledOnceWith("proxy", if_on=True))
def test_warn_loopback_warns_about_IPv4_loopback(self): logger = self.patch(zonegenerator, "logger") loopback = "127.0.0.1" warn_loopback(loopback) self.assertThat(logger.warning, MockCalledOnceWith(WARNING_MESSAGE % loopback))
def test_compose_multiple_interface_constraints(self): pod_id = factory.make_name("pod_id") context = self.make_parameters_context() request = make_requested_machine() request.interfaces = [ RequestedMachineInterface( ifname=factory.make_name("ifname"), attach_name=factory.make_name("bridge_name"), attach_type="bridge", attach_options=None, ) for _ in range(3) ] # LXD uses 'bridged' while MAAS uses 'bridge' so convert # the nictype as this is what we expect from LXDPodDriver.compose. expected_interfaces = [ { "name": request.interfaces[i].ifname, "parent": request.interfaces[i].attach_name, "nictype": "bridged", "type": "nic", } for i in range(3) ] expected_interfaces[0]["boot.priority"] = "1" driver = lxd_module.LXDPodDriver() Client = self.patch(driver, "get_client") client = Client.return_value mock_profile = Mock() mock_profile.name = random.choice(["maas", "default"]) profile_devices = { "eth0": { "name": "eth0", "nictype": "bridged", "parent": "lxdbr0", "type": "nic", }, "eth1": { "boot.priority": "1", "name": "eth1", "nictype": "bridged", "parent": "virbr1", "type": "nic", }, "root": { "boot.priority": "0", "path": "/", "pool": "default", "type": "disk", "size": "20GB", }, } mock_profile.devices = profile_devices client.profiles.get.return_value = mock_profile mock_storage_pools = Mock() client.storage_pools.all.return_value = mock_storage_pools mock_get_usable_storage_pool = self.patch( driver, "get_usable_storage_pool" ) usable_pool = factory.make_name("pool") mock_get_usable_storage_pool.return_value = usable_pool mock_get_best_nic_from_profile = self.patch( driver, "get_best_nic_from_profile" ) mock_get_best_nic_from_profile.return_value = ( "eth1", profile_devices["eth1"], ) mock_machine = Mock() client.virtual_machines.create.return_value = mock_machine mock_get_discovered_machine = self.patch( driver, "get_discovered_machine" ) mock_get_discovered_machine.side_effect = async_succeed( sentinel.discovered_machine ) definition = { "name": request.hostname, "architecture": debian_to_kernel_architecture( request.architecture ), "config": { "limits.cpu": str(request.cores), "limits.memory": str(request.memory * 1024 ** 2), "limits.memory.hugepages": "false", "security.secureboot": "false", }, "profiles": [mock_profile.name], "source": {"type": "none"}, "devices": { "root": { "path": "/", "type": "disk", "pool": usable_pool, "size": str(request.block_devices[0].size), "boot.priority": "0", }, expected_interfaces[0]["name"]: expected_interfaces[0], expected_interfaces[1]["name"]: expected_interfaces[1], expected_interfaces[2]["name"]: expected_interfaces[2], "eth1": {"type": "none"}, "eth0": {"type": "none"}, }, } discovered_machine, empty_hints = yield driver.compose( pod_id, context, request ) self.assertThat( client.virtual_machines.create, MockCalledOnceWith(definition, wait=True), ) self.assertEquals(sentinel.discovered_machine, discovered_machine) self.assertThat( empty_hints, MatchesAll( IsInstance(DiscoveredPodHints), MatchesStructure( cores=Equals(-1), cpu_speed=Equals(-1), memory=Equals(-1), local_storage=Equals(-1), local_disks=Equals(-1), iscsi_storage=Equals(-1), ), ), )
def test_save_validates_unique_except_for_pk_when_new(self): obj = CleanSaveTestModel() mock_validate_unique = self.patch(obj, "validate_unique") obj.save() self.assertThat(mock_validate_unique, MockCalledOnceWith(exclude=["id"]))
def test_calls_which(self): mock_call_and_check = self.patch(shell_module, "call_and_check") cmd = factory.make_name("cmd") has_command_available(cmd) self.assertThat(mock_call_and_check, MockCalledOnceWith(["which", cmd]))
def test_save_performed_with_force_insert(self): obj = CleanSaveTestModel.objects.create() mock_save = self.patch(Model, "save") obj.save(force_insert=True) self.assertThat(mock_save, MockCalledOnceWith(force_insert=True))
def test__defaults(self): sysexit = self.assertRaises(SystemExit, parallel.main, []) self.assertThat(sysexit.code, Equals(0)) self.assertThat(parallel.test, MockCalledOnceWith( ANY, ANY, max(os.cpu_count() - 2, 2)))
def test__checks_connectivity_of_rack_controllers(self): getAllClients = self.patch(middleware_module, 'getAllClients') self.quick_process() self.assertThat(getAllClients, MockCalledOnceWith())
def test__subprocess_count_can_be_specified(self): count = random.randrange(100, 1000) sysexit = self.assertRaises( SystemExit, parallel.main, ["--subprocesses", str(count)]) self.assertThat(sysexit.code, Equals(0)) self.assertThat(parallel.test, MockCalledOnceWith(ANY, ANY, count))
def test_bmc_list_sections(self): """Ensure bmc-config is called with the correct args.""" recorder = self.patch(maas_ipmi_autodetect, "run_command") bmc_list_sections() self.assertThat(recorder, MockCalledOnceWith(("bmc-config", "-L")))
def test__later_is_the_default(self): tag = Tag(name=factory.make_name("tag")) self.patch(tag, "_populate_nodes_later") self.assertThat(tag._populate_nodes_later, MockNotCalled()) tag.save() self.assertThat(tag._populate_nodes_later, MockCalledOnceWith())
def test__executes_rndc_command(self): self.patch_autospec(actions, "execute_rndc_command") self.assertTrue(actions.bind_reload_zones(sentinel.zone)) self.assertThat(actions.execute_rndc_command, MockCalledOnceWith(("reload", sentinel.zone)))
def test_applies_tags_to_nodes_on_save(self): populate_nodes = self.patch_autospec(Tag, "populate_nodes") tag = Tag(name=factory.make_name("tag"), definition="//node/child") self.assertThat(populate_nodes, MockNotCalled()) tag.save() self.assertThat(populate_nodes, MockCalledOnceWith(tag))
def test__creates_cache_with_None_progress(self): mock_Cache = self.patch(version.apt_pkg, "Cache") version.get_version_from_apt(version.REGION_PACKAGE_NAME) self.assertThat(mock_Cache, MockCalledOnceWith(None))
def test_save_always_calls_save_when_new(self): mock_save = self.patch(Model, "save") obj = CleanSaveTestModel() obj.save() self.assertThat(mock_save, MockCalledOnceWith())
def test_makeService_patches_tftp_service(self): mock_tftp_patch = (self.patch(plugin_module, 'add_patches_to_txtftp')) options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service_maker.makeService(options, clock=None) self.assertThat(mock_tftp_patch, MockCalledOnceWith())
def test_save_performed_when_id_reset(self): obj = CleanSaveTestModel.objects.create() obj.id = None mock_save = self.patch(Model, "save") obj.save() self.assertThat(mock_save, MockCalledOnceWith())
def test_subprocess_per_core_can_be_specified(self): sysexit = self.assertRaises(SystemExit, parallel.main, ["--subprocess-per-core"]) self.assertThat(sysexit.code, Equals(0)) self.assertThat(parallel.test, MockCalledOnceWith(ANY, ANY, os.cpu_count()))
def test_save_performed_when_state_forced(self): obj = CleanSaveTestModel.objects.create() obj._state.adding = True mock_save = self.patch(Model, "save") obj.save() self.assertThat(mock_save, MockCalledOnceWith())
def test_power_off(self): driver = fence_cdu_module.FenceCDUPowerDriver() context = self.make_context() mock = self.patch(driver, '_issue_fence_cdu_command') driver.power_off("fake_id", context) self.assertThat(mock, MockCalledOnceWith('off', **context))
def test_run_calls_refreshDiscoveryConfig(self): clock = Clock() service = ActiveDiscoveryService(clock) refreshDiscoveryConfig = self.patch(service, "refreshDiscoveryConfig") service.startService() self.assertThat(refreshDiscoveryConfig, MockCalledOnceWith())