Example #1
0
class TestPowerQuery(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super(TestPowerQuery, self).setUp()
        self.useFixture(EventTypesAllRegistered())
        self.patch(power, "deferToThread", maybeDeferred)
        for _, power_driver in PowerDriverRegistry:
            self.patch(
                power_driver, "detect_missing_packages"
            ).return_value = []

    def patch_rpc_methods(self, return_value={}, side_effect=None):
        fixture = self.useFixture(MockClusterToRegionRPCFixture())
        protocol, io = fixture.makeEventLoop(
            region.MarkNodeFailed,
            region.SendEvent,
            region.UpdateNodePowerState,
        )
        protocol.MarkNodeFailed.return_value = return_value
        protocol.MarkNodeFailed.side_effect = side_effect
        return protocol.SendEvent, protocol.MarkNodeFailed, io

    def test_power_query_failure_emits_event(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        message = factory.make_name("message")
        SendEvent, _, io = self.patch_rpc_methods()
        d = power.power_query_failure(
            system_id, hostname, Failure(Exception(message))
        )
        # This blocks until the deferred is complete.
        io.flush()
        self.assertIsNone(extract_result(d))
        self.assertThat(
            SendEvent,
            MockCalledOnceWith(
                ANY,
                type_name=EVENT_TYPES.NODE_POWER_QUERY_FAILED,
                system_id=system_id,
                description=message,
            ),
        )

    def test_power_query_success_emits_event(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        state = factory.make_name("state")
        message = "Power state queried: %s" % state
        SendEvent, _, io = self.patch_rpc_methods()
        d = power.power_query_success(system_id, hostname, state)
        # This blocks until the deferred is complete.
        io.flush()
        self.assertIsNone(extract_result(d))
        self.assertThat(
            SendEvent,
            MockCalledOnceWith(
                ANY,
                type_name=EVENT_TYPES.NODE_POWER_QUERIED_DEBUG,
                system_id=system_id,
                description=message,
            ),
        )

    def test_get_power_state_queries_node(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_state = random.choice(["on", "off"])
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        _, markNodeBroken, io = self.patch_rpc_methods()
        mock_perform_power_driver_query = self.patch(
            power, "perform_power_driver_query"
        )
        mock_perform_power_driver_query.return_value = power_state

        d = power.get_power_state(
            system_id, hostname, power_driver.name, context
        )
        # This blocks until the deferred is complete.
        io.flush()
        self.assertEqual(power_state, extract_result(d))
        self.assertThat(
            power_driver.detect_missing_packages, MockCalledOnceWith()
        )
        self.assertThat(
            mock_perform_power_driver_query,
            MockCallsMatch(
                call(system_id, hostname, power_driver.name, context)
            ),
        )

    def test_get_power_state_fails_for_missing_packages(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = False
        _, markNodeBroken, io = self.patch_rpc_methods()

        power_driver.detect_missing_packages.return_value = ["gone"]

        d = power.get_power_state(
            system_id, hostname, power_driver.name, context
        )
        # This blocks until the deferred is complete.
        io.flush()

        self.assertThat(
            power_driver.detect_missing_packages, MockCalledOnceWith()
        )
        return assert_fails_with(d, exceptions.PowerActionFail)

    def test_report_power_state_changes_power_state_if_failure(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        err_msg = factory.make_name("error")

        _, _, io = self.patch_rpc_methods()
        self.patch_autospec(power, "power_state_update")

        # Simulate a failure when querying state.
        query = fail(exceptions.PowerActionFail(err_msg))
        report = power.report_power_state(query, system_id, hostname)
        # This blocks until the deferred is complete.
        io.flush()

        error = self.assertRaises(
            exceptions.PowerActionFail, extract_result, report
        )
        self.assertEqual(err_msg, str(error))
        self.assertThat(
            power.power_state_update, MockCalledOnceWith(system_id, "error")
        )

    def test_report_power_state_changes_power_state_if_success(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_state = random.choice(["on", "off"])

        _, _, io = self.patch_rpc_methods()
        self.patch_autospec(power, "power_state_update")

        # Simulate a success when querying state.
        query = succeed(power_state)
        report = power.report_power_state(query, system_id, hostname)
        # This blocks until the deferred is complete.
        io.flush()

        self.assertEqual(power_state, extract_result(report))
        self.assertThat(
            power.power_state_update,
            MockCalledOnceWith(system_id, power_state),
        )

    def test_report_power_state_changes_power_state_if_unknown(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_state = "unknown"

        _, _, io = self.patch_rpc_methods()
        self.patch_autospec(power, "power_state_update")

        # Simulate a success when querying state.
        query = succeed(power_state)
        report = power.report_power_state(query, system_id, hostname)
        # This blocks until the deferred is complete.
        io.flush()

        self.assertEqual(power_state, extract_result(report))
        self.assertThat(
            power.power_state_update,
            MockCalledOnceWith(system_id, power_state),
        )
Example #2
0
class TestConfigureDHCP(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    scenarios = (
        ("DHCPv4", {"server": dhcp.DHCPv4Server}),
        ("DHCPv6", {"server": dhcp.DHCPv6Server}),
    )

    def setUp(self):
        super().setUp()
        # The service monitor is an application global and so are the services
        # it monitors, and tests must leave them as they found them.
        self.addCleanup(dhcp.service_monitor.getServiceByName("dhcpd").off)
        self.addCleanup(dhcp.service_monitor.getServiceByName("dhcpd6").off)
        # The dhcp server states are global so we clean them after each test.
        self.addCleanup(dhcp._current_server_state.clear)
        # Temporarily prevent hostname resolution when generating DHCP
        # configuration. This is tested elsewhere.
        self.useFixture(DHCPConfigNameResolutionDisabled())

    def configure(
        self,
        omapi_key,
        failover_peers,
        shared_networks,
        hosts,
        interfaces,
        dhcp_snippets,
    ):
        server = self.server(omapi_key)
        return dhcp.configure(
            server,
            failover_peers,
            shared_networks,
            hosts,
            interfaces,
            dhcp_snippets,
        )

    def patch_os_exists(self):
        return self.patch_autospec(dhcp.os.path, "exists")

    def patch_sudo_delete_file(self):
        return self.patch_autospec(dhcp, "sudo_delete_file")

    def patch_sudo_write_file(self):
        return self.patch_autospec(dhcp, "sudo_write_file")

    def patch_restartService(self):
        return self.patch(dhcp.service_monitor, "restartService")

    def patch_ensureService(self):
        return self.patch(dhcp.service_monitor, "ensureService")

    def patch_getServiceState(self):
        return self.patch(dhcp.service_monitor, "getServiceState")

    def patch_get_config(self):
        return self.patch_autospec(dhcp, "get_config")

    def patch_update_hosts(self):
        return self.patch(dhcp, "_update_hosts")

    @inlineCallbacks
    def test_deletes_dhcp_config_if_no_subnets_defined(self):
        mock_exists = self.patch_os_exists()
        mock_exists.return_value = True
        mock_sudo_delete = self.patch_sudo_delete_file()
        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        self.patch_autospec(dhcp_service, "off")
        self.patch_restartService()
        self.patch_ensureService()
        yield self.configure(factory.make_name("key"), [], [], [], [], [])
        self.assertThat(
            mock_sudo_delete, MockCalledOnceWith(self.server.config_filename)
        )

    @inlineCallbacks
    def test_stops_dhcp_server_if_no_subnets_defined(self):
        mock_exists = self.patch_os_exists()
        mock_exists.return_value = False
        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        off = self.patch_autospec(dhcp_service, "off")
        restart_service = self.patch_restartService()
        ensure_service = self.patch_ensureService()
        yield self.configure(factory.make_name("key"), [], [], [], [], [])
        self.assertThat(off, MockCalledOnceWith())
        self.assertThat(
            ensure_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertThat(restart_service, MockNotCalled())

    @inlineCallbacks
    def test_stops_dhcp_server_clears_state(self):
        dhcp._current_server_state[self.server.dhcp_service] = sentinel.state
        mock_exists = self.patch_os_exists()
        mock_exists.return_value = False
        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        self.patch_autospec(dhcp_service, "off")
        self.patch_restartService()
        self.patch_ensureService()
        yield self.configure(factory.make_name("key"), [], [], [], [], [])
        self.assertIsNone(dhcp._current_server_state[self.server.dhcp_service])

    @inlineCallbacks
    def test_writes_config_and_calls_restart_when_no_current_state(self):
        write_file = self.patch_sudo_write_file()
        restart_service = self.patch_restartService()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        host = make_host()
        interface = make_interface()
        global_dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        omapi_key = factory.make_name("omapi_key")
        yield self.configure(
            omapi_key,
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            global_dhcp_snippets,
        )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(
            restart_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                [host],
                [interface],
                global_dhcp_snippets,
            ),
        )

    @inlineCallbacks
    def test_writes_config_and_calls_restart_when_non_host_state_diff(self):
        write_file = self.patch_sudo_write_file()
        restart_service = self.patch_restartService()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        host = make_host()
        interface = make_interface()
        global_dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        old_state = dhcp.DHCPState(
            factory.make_name("omapi_key"),
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            global_dhcp_snippets,
        )
        dhcp._current_server_state[self.server.dhcp_service] = old_state

        omapi_key = factory.make_name("omapi_key")
        yield self.configure(
            omapi_key,
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            global_dhcp_snippets,
        )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(
            restart_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                [host],
                [interface],
                global_dhcp_snippets,
            ),
        )

    @inlineCallbacks
    def test_writes_config_and_calls_ensure_when_nothing_changed(self):
        write_file = self.patch_sudo_write_file()
        restart_service = self.patch_restartService()
        ensure_service = self.patch_ensureService()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        host = make_host()
        interface = make_interface()
        dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        omapi_key = factory.make_name("omapi_key")
        old_state = dhcp.DHCPState(
            omapi_key,
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            dhcp_snippets,
        )
        dhcp._current_server_state[self.server.dhcp_service] = old_state

        yield self.configure(
            omapi_key,
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            dhcp_snippets,
        )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(restart_service, MockNotCalled())
        self.assertThat(
            ensure_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                [host],
                [interface],
                dhcp_snippets,
            ),
        )

    @inlineCallbacks
    def test_writes_config_and_doesnt_use_omapi_when_was_off(self):
        write_file = self.patch_sudo_write_file()
        get_service_state = self.patch_getServiceState()
        get_service_state.return_value = ServiceState(
            SERVICE_STATE.OFF, "dead"
        )
        restart_service = self.patch_restartService()
        ensure_service = self.patch_ensureService()
        update_hosts = self.patch_update_hosts()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        host = make_host(dhcp_snippets=[])
        interface = make_interface()
        global_dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        omapi_key = factory.make_name("omapi_key")
        old_host = make_host(dhcp_snippets=[])
        old_state = dhcp.DHCPState(
            omapi_key,
            [failover_peers],
            [shared_network],
            [old_host],
            [interface],
            global_dhcp_snippets,
        )
        dhcp._current_server_state[self.server.dhcp_service] = old_state

        yield self.configure(
            omapi_key,
            [failover_peers],
            [shared_network],
            [host],
            [interface],
            global_dhcp_snippets,
        )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(
            get_service_state,
            MockCalledOnceWith(self.server.dhcp_service, now=True),
        )
        self.assertThat(restart_service, MockNotCalled())
        self.assertThat(
            ensure_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertThat(update_hosts, MockNotCalled())
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                [host],
                [interface],
                global_dhcp_snippets,
            ),
        )

    @inlineCallbacks
    def test_writes_config_and_uses_omapi_to_update_hosts(self):
        write_file = self.patch_sudo_write_file()
        get_service_state = self.patch_getServiceState()
        get_service_state.return_value = ServiceState(
            SERVICE_STATE.ON, "running"
        )
        restart_service = self.patch_restartService()
        ensure_service = self.patch_ensureService()
        update_hosts = self.patch_update_hosts()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        old_hosts = [make_host(dhcp_snippets=[]) for _ in range(3)]
        interface = make_interface()
        global_dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        omapi_key = factory.make_name("omapi_key")
        old_state = dhcp.DHCPState(
            omapi_key,
            [failover_peers],
            [shared_network],
            old_hosts,
            [interface],
            global_dhcp_snippets,
        )
        dhcp._current_server_state[self.server.dhcp_service] = old_state

        new_hosts = copy.deepcopy(old_hosts)
        removed_host = new_hosts.pop()
        modified_host = new_hosts[0]
        modified_host["ip"] = factory.make_ip_address()
        added_host = make_host(dhcp_snippets=[])
        new_hosts.append(added_host)

        yield self.configure(
            omapi_key,
            [failover_peers],
            [shared_network],
            new_hosts,
            [interface],
            global_dhcp_snippets,
        )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(
            get_service_state,
            MockCalledOnceWith(self.server.dhcp_service, now=True),
        )
        self.assertThat(restart_service, MockNotCalled())
        self.assertThat(
            ensure_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertThat(
            update_hosts,
            MockCalledOnceWith(
                ANY, [removed_host], [added_host], [modified_host]
            ),
        )
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                new_hosts,
                [interface],
                global_dhcp_snippets,
            ),
        )

    @inlineCallbacks
    def test_writes_config_and_restarts_when_omapi_fails(self):
        write_file = self.patch_sudo_write_file()
        get_service_state = self.patch_getServiceState()
        get_service_state.return_value = ServiceState(
            SERVICE_STATE.ON, "running"
        )
        restart_service = self.patch_restartService()
        ensure_service = self.patch_ensureService()
        update_hosts = self.patch_update_hosts()
        update_hosts.side_effect = factory.make_exception()

        failover_peers = make_failover_peer_config()
        shared_network = make_shared_network()
        [shared_network] = fix_shared_networks_failover(
            [shared_network], [failover_peers]
        )
        old_hosts = [make_host(dhcp_snippets=[]) for _ in range(3)]
        interface = make_interface()
        global_dhcp_snippets = make_global_dhcp_snippets()
        expected_config = factory.make_name("config")
        self.patch_get_config().return_value = expected_config

        dhcp_service = dhcp.service_monitor.getServiceByName(
            self.server.dhcp_service
        )
        on = self.patch_autospec(dhcp_service, "on")

        omapi_key = factory.make_name("omapi_key")
        old_state = dhcp.DHCPState(
            omapi_key,
            [failover_peers],
            [shared_network],
            old_hosts,
            [interface],
            global_dhcp_snippets,
        )
        dhcp._current_server_state[self.server.dhcp_service] = old_state

        new_hosts = copy.deepcopy(old_hosts)
        removed_host = new_hosts.pop()
        modified_host = new_hosts[0]
        modified_host["ip"] = factory.make_ip_address()
        added_host = make_host(dhcp_snippets=[])
        new_hosts.append(added_host)

        with FakeLogger("maas") as logger:
            yield self.configure(
                omapi_key,
                [failover_peers],
                [shared_network],
                new_hosts,
                [interface],
                global_dhcp_snippets,
            )

        self.assertThat(
            write_file,
            MockCallsMatch(
                call(
                    self.server.config_filename,
                    expected_config.encode("utf-8"),
                    mode=0o640,
                ),
                call(
                    self.server.interfaces_filename,
                    interface["name"].encode("utf-8"),
                    mode=0o640,
                ),
            ),
        )
        self.assertThat(on, MockCalledOnceWith())
        self.assertThat(
            get_service_state,
            MockCalledOnceWith(self.server.dhcp_service, now=True),
        )
        self.assertThat(
            restart_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertThat(
            ensure_service, MockCalledOnceWith(self.server.dhcp_service)
        )
        self.assertThat(
            update_hosts,
            MockCalledOnceWith(
                ANY, [removed_host], [added_host], [modified_host]
            ),
        )
        self.assertEquals(
            dhcp._current_server_state[self.server.dhcp_service],
            dhcp.DHCPState(
                omapi_key,
                [failover_peers],
                [shared_network],
                new_hosts,
                [interface],
                global_dhcp_snippets,
            ),
        )
        self.assertDocTestMatches(
            "Failed to update all host maps. Restarting DHCPv... "
            "service to ensure host maps are in-sync.",
            logger.output,
        )

    @inlineCallbacks
    def test_converts_failure_writing_file_to_CannotConfigureDHCP(self):
        self.patch_sudo_delete_file()
        self.patch_sudo_write_file().side_effect = ExternalProcessError(
            1, "sudo something"
        )
        self.patch_restartService()
        failover_peers = [make_failover_peer_config()]
        shared_networks = fix_shared_networks_failover(
            [make_shared_network()], failover_peers
        )
        with ExpectedException(exceptions.CannotConfigureDHCP):
            yield self.configure(
                factory.make_name("key"),
                failover_peers,
                shared_networks,
                [make_host()],
                [make_interface()],
                make_global_dhcp_snippets(),
            )

    @inlineCallbacks
    def test_converts_dhcp_restart_failure_to_CannotConfigureDHCP(self):
        self.patch_sudo_write_file()
        self.patch_sudo_delete_file()
        self.patch_restartService().side_effect = ServiceActionError()
        failover_peers = [make_failover_peer_config()]
        shared_networks = fix_shared_networks_failover(
            [make_shared_network()], failover_peers
        )
        with ExpectedException(exceptions.CannotConfigureDHCP):
            yield self.configure(
                factory.make_name("key"),
                failover_peers,
                shared_networks,
                [make_host()],
                [make_interface()],
                make_global_dhcp_snippets(),
            )

    @inlineCallbacks
    def test_converts_stop_dhcp_server_failure_to_CannotConfigureDHCP(self):
        self.patch_sudo_write_file()
        self.patch_sudo_delete_file()
        self.patch_ensureService().side_effect = ServiceActionError()
        with ExpectedException(exceptions.CannotConfigureDHCP):
            yield self.configure(factory.make_name("key"), [], [], [], [], [])

    @inlineCallbacks
    def test_does_not_log_ServiceActionError(self):
        self.patch_sudo_write_file()
        self.patch_sudo_delete_file()
        self.patch_ensureService().side_effect = ServiceActionError()
        with FakeLogger("maas") as logger:
            with ExpectedException(exceptions.CannotConfigureDHCP):
                yield self.configure(
                    factory.make_name("key"), [], [], [], [], []
                )
        self.assertDocTestMatches("", logger.output)

    @inlineCallbacks
    def test_does_log_other_exceptions(self):
        self.patch_sudo_write_file()
        self.patch_sudo_delete_file()
        self.patch_ensureService().side_effect = factory.make_exception(
            "DHCP is on strike today"
        )
        with FakeLogger("maas") as logger:
            with ExpectedException(exceptions.CannotConfigureDHCP):
                yield self.configure(
                    factory.make_name("key"), [], [], [], [], []
                )
        self.assertDocTestMatches(
            "DHCPv... server failed to stop: DHCP is on strike today",
            logger.output,
        )

    @inlineCallbacks
    def test_does_not_log_ServiceActionError_when_restarting(self):
        self.patch_sudo_write_file()
        self.patch_restartService().side_effect = ServiceActionError()
        failover_peers = [make_failover_peer_config()]
        shared_networks = fix_shared_networks_failover(
            [make_shared_network()], failover_peers
        )
        with FakeLogger("maas") as logger:
            with ExpectedException(exceptions.CannotConfigureDHCP):
                yield self.configure(
                    factory.make_name("key"),
                    failover_peers,
                    shared_networks,
                    [make_host()],
                    [make_interface()],
                    make_global_dhcp_snippets(),
                )
        self.assertDocTestMatches("", logger.output)

    @inlineCallbacks
    def test_does_log_other_exceptions_when_restarting(self):
        self.patch_sudo_write_file()
        self.patch_restartService().side_effect = factory.make_exception(
            "DHCP is on strike today"
        )
        failover_peers = [make_failover_peer_config()]
        shared_networks = fix_shared_networks_failover(
            [make_shared_network()], failover_peers
        )
        with FakeLogger("maas") as logger:
            with ExpectedException(exceptions.CannotConfigureDHCP):
                yield self.configure(
                    factory.make_name("key"),
                    failover_peers,
                    shared_networks,
                    [make_host()],
                    [make_interface()],
                    make_global_dhcp_snippets(),
                )
        self.assertDocTestMatches(
            "DHCPv... server failed to restart: " "DHCP is on strike today",
            logger.output,
        )
Example #3
0
class TestPowerQueryAsync(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super().setUp()

    def make_node(self, power_type=None):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        if power_type is None:
            power_type = random.choice(
                [
                    driver.name
                    for _, driver in PowerDriverRegistry
                    if driver.queryable
                ]
            )
        state = random.choice(["on", "off", "unknown", "error"])
        context = {
            factory.make_name("context-key"): (
                factory.make_name("context-val")
            )
        }
        return {
            "context": context,
            "hostname": hostname,
            "power_state": state,
            "power_type": power_type,
            "system_id": system_id,
        }

    def make_nodes(self, count=3):
        nodes = [self.make_node() for _ in range(count)]
        # Sanity check that these nodes are something that can emerge
        # from a call to ListNodePowerParameters.
        region.ListNodePowerParameters.makeResponse({"nodes": nodes}, None)
        return nodes

    def pick_alternate_state(self, state):
        return random.choice(
            [
                value
                for value in ["on", "off", "unknown", "error"]
                if value != state
            ]
        )

    @inlineCallbacks
    def test_query_all_nodes_gets_and_reports_power_state(self):
        nodes = self.make_nodes()

        # Report back that all nodes' power states are as recorded.
        power_states = [node["power_state"] for node in nodes]
        queries = list(map(succeed, power_states))
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = queries
        report_power_state = self.patch(power, "report_power_state")
        report_power_state.side_effect = lambda d, sid, hn: d

        yield power.query_all_nodes(nodes)
        self.assertThat(
            get_power_state,
            MockCallsMatch(
                *(
                    call(
                        node["system_id"],
                        node["hostname"],
                        node["power_type"],
                        node["context"],
                        clock=reactor,
                    )
                    for node in nodes
                )
            ),
        )
        self.assertThat(
            report_power_state,
            MockCallsMatch(
                *(
                    call(query, node["system_id"], node["hostname"])
                    for query, node in zip(queries, nodes)
                )
            ),
        )

    @inlineCallbacks
    def test_query_all_nodes_skips_nodes_in_action_registry(self):
        nodes = self.make_nodes()

        # First node is in the registry.
        power.power_action_registry[nodes[0]["system_id"]] = sentinel.action

        # Report back power state of nodes' not in registry.
        power_states = [node["power_state"] for node in nodes[1:]]
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = map(succeed, power_states)
        suppress_reporting(self)

        yield power.query_all_nodes(nodes)
        self.assertThat(
            get_power_state,
            MockCallsMatch(
                *(
                    call(
                        node["system_id"],
                        node["hostname"],
                        node["power_type"],
                        node["context"],
                        clock=reactor,
                    )
                    for node in nodes[1:]
                )
            ),
        )
        self.assertThat(
            get_power_state,
            Not(
                MockCalledWith(
                    nodes[0]["system_id"],
                    nodes[0]["hostname"],
                    nodes[0]["power_type"],
                    nodes[0]["context"],
                    clock=reactor,
                )
            ),
        )

    @inlineCallbacks
    def test_query_all_nodes_only_queries_queryable_power_types(self):
        nodes = self.make_nodes()
        # nodes are all queryable, so add one that isn't:
        nodes.append(self.make_node(power_type="manual"))

        # Report back that all nodes' power states are as recorded.
        power_states = [node["power_state"] for node in nodes]
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = map(succeed, power_states)
        suppress_reporting(self)

        yield power.query_all_nodes(nodes)
        self.assertThat(
            get_power_state,
            MockCallsMatch(
                *(
                    call(
                        node["system_id"],
                        node["hostname"],
                        node["power_type"],
                        node["context"],
                        clock=reactor,
                    )
                    for node in nodes
                    if node["power_type"] in PowerDriverRegistry
                )
            ),
        )

    @inlineCallbacks
    def test_query_all_nodes_swallows_PowerActionFail(self):
        node1, node2 = self.make_nodes(2)
        new_state_2 = self.pick_alternate_state(node2["power_state"])
        get_power_state = self.patch(power, "get_power_state")
        error_msg = factory.make_name("error")
        get_power_state.side_effect = [
            fail(exceptions.PowerActionFail(error_msg)),
            succeed(new_state_2),
        ]
        suppress_reporting(self)

        with FakeLogger("maas.power", level=logging.DEBUG) as maaslog:
            yield power.query_all_nodes([node1, node2])

        self.assertDocTestMatches(
            """\
            hostname-...: Could not query power state: %s.
            hostname-...: Power state has changed from ... to ...
            """
            % error_msg,
            maaslog.output,
        )

    @inlineCallbacks
    def test_query_all_nodes_swallows_PowerError(self):
        node1, node2 = self.make_nodes(2)
        new_state_2 = self.pick_alternate_state(node2["power_state"])
        get_power_state = self.patch(power, "get_power_state")
        error_msg = factory.make_name("error")
        get_power_state.side_effect = [
            fail(PowerError(error_msg)),
            succeed(new_state_2),
        ]
        suppress_reporting(self)

        with FakeLogger("maas.power", level=logging.DEBUG) as maaslog:
            yield power.query_all_nodes([node1, node2])

        self.assertDocTestMatches(
            """\
            %s: Could not query power state: %s.
            %s: Power state has changed from %s to %s.
            """
            % (
                node1["hostname"],
                error_msg,
                node2["hostname"],
                node2["power_state"],
                new_state_2,
            ),
            maaslog.output,
        )

    @inlineCallbacks
    def test_query_all_nodes_swallows_NoSuchNode(self):
        node1, node2 = self.make_nodes(2)
        new_state_2 = self.pick_alternate_state(node2["power_state"])
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = [
            fail(exceptions.NoSuchNode()),
            succeed(new_state_2),
        ]
        suppress_reporting(self)

        with FakeLogger("maas.power", level=logging.DEBUG) as maaslog:
            yield power.query_all_nodes([node1, node2])

        self.assertDocTestMatches(
            """\
            hostname-...: Power state has changed from ... to ...
            """,
            maaslog.output,
        )

    @inlineCallbacks
    def test_query_all_nodes_swallows_Exception(self):
        node1, node2 = self.make_nodes(2)
        error_message = factory.make_name("error")
        error_type = factory.make_exception_type()
        new_state_2 = self.pick_alternate_state(node2["power_state"])
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = [
            fail(error_type(error_message)),
            succeed(new_state_2),
        ]
        suppress_reporting(self)

        maaslog = FakeLogger("maas.power", level=logging.DEBUG)
        twistlog = TwistedLoggerFixture()

        with maaslog, twistlog:
            yield power.query_all_nodes([node1, node2])

        self.assertDocTestMatches(
            """\
            hostname-...: Failed to refresh power state: %s
            hostname-...: Power state has changed from ... to ...
            """
            % error_message,
            maaslog.output,
        )

    @inlineCallbacks
    def test_query_all_nodes_returns_deferredlist_of_number_of_nodes(self):
        node1, node2 = self.make_nodes(2)
        get_power_state = self.patch(power, "get_power_state")
        get_power_state.side_effect = [
            succeed(node1["power_state"]),
            succeed(node2["power_state"]),
        ]
        suppress_reporting(self)

        results = yield power.query_all_nodes([node1, node2])
        self.assertEqual(
            [(True, node1["power_state"]), (True, node2["power_state"])],
            results,
        )
Example #4
0
class TestRackNetworksMonitoringService(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(debug=True, timeout=5)

    @inlineCallbacks
    def test_runs_refresh_first_time(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.RequestRackRefresh)
        self.addCleanup((yield connecting))

        rpc_service = services.getServiceNamed("rpc")
        service = RackNetworksMonitoringService(
            rpc_service,
            Clock(),
            enable_monitoring=False,
            enable_beaconing=False,
        )

        yield maybeDeferred(service.startService)
        # By stopping the interface_monitor first, we assure that the loop
        # happens at least once before the service stops completely.
        yield maybeDeferred(service.interface_monitor.stopService)
        yield maybeDeferred(service.stopService)

        self.assertThat(
            protocol.RequestRackRefresh,
            MockCalledOnceWith(
                protocol, system_id=rpc_service.getClient().localIdent
            ),
        )

    @inlineCallbacks
    def test_reports_interfaces_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces)
        self.addCleanup((yield connecting))

        interfaces = {
            "eth0": {
                "type": "physical",
                "mac_address": factory.make_mac_address(),
                "parents": [],
                "links": [],
                "enabled": True,
            }
        }

        rpc_service = services.getServiceNamed("rpc")
        service = RackNetworksMonitoringService(
            rpc_service,
            Clock(),
            enable_monitoring=False,
            enable_beaconing=False,
        )
        service.getInterfaces = lambda: succeed(interfaces)
        # Put something in the cache. This tells recordInterfaces that refresh
        # has already run but the interfaces have changed thus they need to be
        # updated.
        service._recorded = {}

        service.startService()
        yield service.stopService()

        self.assertThat(
            protocol.UpdateInterfaces,
            MockCalledOnceWith(
                protocol,
                system_id=rpc_service.getClient().localIdent,
                interfaces=interfaces,
                topology_hints=None,
            ),
        )

    @inlineCallbacks
    def test_reports_interfaces_with_hints_if_beaconing_enabled(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces)
        # Don't actually wait for beaconing to complete.
        pause_mock = self.patch(services_module, "pause")
        queue_mcast_mock = self.patch(
            services_module.BeaconingSocketProtocol, "queueMulticastBeaconing"
        )
        self.addCleanup((yield connecting))

        interfaces = {
            "eth0": {
                "type": "physical",
                "mac_address": factory.make_mac_address(),
                "parents": [],
                "links": [],
                "enabled": True,
            }
        }

        rpc_service = services.getServiceNamed("rpc")
        service = RackNetworksMonitoringService(
            rpc_service,
            Clock(),
            enable_monitoring=False,
            enable_beaconing=True,
        )
        service.getInterfaces = lambda: succeed(interfaces)
        # Put something in the cache. This tells recordInterfaces that refresh
        # has already run but the interfaces have changed thus they need to be
        # updated.
        service._recorded = {}

        service.startService()
        yield service.stopService()

        self.assertThat(
            protocol.UpdateInterfaces,
            MockCalledOnceWith(
                protocol,
                system_id=rpc_service.getClient().localIdent,
                interfaces=interfaces,
                topology_hints=[],
            ),
        )
        # The service should have sent out beacons, waited three seconds,
        # solicited for more beacons, then waited another three seconds before
        # deciding that beaconing is complete.
        self.assertThat(pause_mock, MockCallsMatch(call(3.0), call(3.0)))
        self.assertThat(
            queue_mcast_mock,
            MockCallsMatch(
                # Called when the service starts.
                call(solicitation=True),
                # Called three seconds later.
                call(solicitation=True),
                # Not called again when the service shuts down.
            ),
        )

    @inlineCallbacks
    def test_reports_neighbours_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateInterfaces, region.ReportNeighbours
        )
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed("rpc")
        service = RackNetworksMonitoringService(
            rpc_service,
            Clock(),
            enable_monitoring=False,
            enable_beaconing=False,
        )
        neighbours = [{"ip": factory.make_ip_address()}]
        yield service.reportNeighbours(neighbours)
        self.assertThat(
            protocol.ReportNeighbours,
            MockCalledOnceWith(
                protocol,
                system_id=rpc_service.getClient().localIdent,
                neighbours=neighbours,
            ),
        )

    @inlineCallbacks
    def test_reports_mdns_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateInterfaces, region.ReportMDNSEntries
        )
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed("rpc")
        service = RackNetworksMonitoringService(
            rpc_service,
            Clock(),
            enable_monitoring=False,
            enable_beaconing=False,
        )
        mdns = [
            {
                "interface": "eth0",
                "hostname": "boggle.example.com",
                "address": factory.make_ip_address(),
            }
        ]
        yield service.reportMDNSEntries(mdns)
        self.assertThat(
            protocol.ReportMDNSEntries,
            MockCalledOnceWith(
                protocol,
                system_id=rpc_service.getClient().localIdent,
                mdns=mdns,
            ),
        )

    @inlineCallbacks
    def test_asks_region_for_monitoring_state(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateInterfaces, region.GetDiscoveryState
        )
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed("rpc")
        reactor = Clock()
        service = RackNetworksMonitoringService(
            rpc_service,
            reactor,
            enable_monitoring=False,
            enable_beaconing=False,
        )
        protocol.GetDiscoveryState.return_value = {"interfaces": {}}
        # Put something in the cache. This tells recordInterfaces that refresh
        # has already run but the interfaces have changed thus they need to be
        # updated.
        service._recorded = {}
        yield service.startService()
        yield maybeDeferred(service.getDiscoveryState)
        yield service.stopService()
        self.assertThat(
            protocol.GetDiscoveryState,
            MockCalledOnceWith(
                protocol, system_id=rpc_service.getClient().localIdent
            ),
        )

    @inlineCallbacks
    def test_requests_beaconing_when_timer_fires(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateInterfaces, region.GetDiscoveryState
        )
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed("rpc")
        reactor = Clock()
        service = RackNetworksMonitoringService(
            rpc_service,
            reactor,
            enable_monitoring=False,
            enable_beaconing=True,
        )
        service.beaconing_protocol = Mock()
        service.beaconing_protocol.queueMulticastBeaconing = Mock()
        service.getInterfaces = lambda: succeed({})
        service._recorded = {}
        service.startService()
        yield service.stopService()
        self.assertThat(
            service.beaconing_protocol.queueMulticastBeaconing,
            MockCallsMatch(call(solicitation=True)),
        )
Example #5
0
class TestBootMethod(MAASTestCase):
    """Test for `BootMethod` in `provisioningserver.boot`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @inlineCallbacks
    def test_get_remote_mac(self):
        remote_host = factory.make_ipv4_address()
        call_context = {
            "local": (factory.make_ipv4_address(), factory.pick_port()),
            "remote": (remote_host, factory.pick_port()),
        }

        mock_find = self.patch(boot, 'find_mac_via_arp')
        yield context.call(call_context, get_remote_mac)
        self.assertThat(mock_find, MockCalledOnceWith(remote_host))

    def test_gen_template_filenames(self):
        purpose = factory.make_name("purpose")
        arch, subarch = factory.make_names("arch", "subarch")
        expected = [
            "config.%s.%s.%s.template" % (purpose, arch, subarch),
            "config.%s.%s.template" % (purpose, arch),
            "config.%s.template" % (purpose, ),
            "config.template",
        ]
        observed = gen_template_filenames(purpose, arch, subarch)
        self.assertSequenceEqual(expected, list(observed))

    def test_get_pxe_template(self):
        method = FakeBootMethod()
        purpose = factory.make_name("purpose")
        arch, subarch = factory.make_names("arch", "subarch")
        filename = factory.make_name("filename")
        # Set up the mocks that we've patched in.
        gen_filenames = self.patch(boot, "gen_template_filenames")
        gen_filenames.return_value = [filename]
        from_filename = self.patch(tempita.Template, "from_filename")
        from_filename.return_value = mock.sentinel.template
        # The template returned matches the return value above.
        template = method.get_template(purpose, arch, subarch)
        self.assertEqual(mock.sentinel.template, template)
        # gen_pxe_template_filenames is called to obtain filenames.
        gen_filenames.assert_called_once_with(purpose, arch, subarch)
        # Tempita.from_filename is called with an absolute path derived from
        # the filename returned from gen_pxe_template_filenames.
        from_filename.assert_called_once_with(os.path.join(
            method.get_template_dir(), filename),
                                              encoding="UTF-8")

    def test_get_template_gets_default_if_available(self):
        # If there is no template matching the purpose, arch, and subarch,
        # but there is a completely generic template, then get_pxe_template()
        # falls back to that as the default.
        templates_dir = self.make_dir()
        method = FakeBootMethod()
        method.get_template_dir = lambda: templates_dir
        generic_template = factory.make_file(templates_dir, 'config.template')
        purpose = factory.make_name("purpose")
        arch, subarch = factory.make_names("arch", "subarch")
        self.assertEqual(generic_template,
                         method.get_template(purpose, arch, subarch).name)

    def test_get_template_not_found(self):
        mock_try_send_rack_event = self.patch(boot, 'try_send_rack_event')
        # It is a critical and unrecoverable error if the default template
        # is not found.
        templates_dir = self.make_dir()
        method = FakeBootMethod()
        method.get_template_dir = lambda: templates_dir
        self.assertRaises(AssertionError, method.get_template,
                          *factory.make_names("purpose", "arch", "subarch"))
        self.assertThat(mock_try_send_rack_event, MockCalledOnce())

    def test_get_templates_only_suppresses_ENOENT(self):
        # The IOError arising from trying to load a template that doesn't
        # exist is suppressed, but other errors are not.
        method = FakeBootMethod()
        from_filename = self.patch(tempita.Template, "from_filename")
        from_filename.side_effect = IOError()
        from_filename.side_effect.errno = errno.EACCES
        self.assertRaises(IOError, method.get_template,
                          *factory.make_names("purpose", "arch", "subarch"))

    def test_link_bootloader_links_simplestream_bootloader_files(self):
        method = FakeBootMethod()
        with tempdir() as tmp:
            stream_path = os.path.join(tmp, 'bootloader',
                                       method.bios_boot_method,
                                       method.bootloader_arches[0])
            os.makedirs(stream_path)
            for bootloader_file in method.bootloader_files:
                factory.make_file(stream_path, bootloader_file)

            method.link_bootloader(tmp)

            for bootloader_file in method.bootloader_files:
                bootloader_file_path = os.path.join(tmp, bootloader_file)
                self.assertTrue(os.path.islink(bootloader_file_path))

    def test_link_bootloader_logs_missing_simplestream_file(self):
        method = FakeBootMethod()
        mock_maaslog = self.patch(maaslog, 'error')
        mock_try_send_rack_event = self.patch(boot, 'try_send_rack_event')
        with tempdir() as tmp:
            stream_path = os.path.join(tmp, 'bootloader',
                                       method.bios_boot_method,
                                       method.bootloader_arches[0])
            os.makedirs(stream_path)
            for bootloader_file in method.bootloader_files[1:]:
                factory.make_file(stream_path, bootloader_file)

            method.link_bootloader(tmp)

            self.assertThat(mock_maaslog, MockCalledOnce())
            self.assertThat(mock_try_send_rack_event, MockCalledOnce())

    def test_link_bootloader_copies_previous_downloaded_files(self):
        method = FakeBootMethod()
        with tempdir() as tmp:
            new_dir = os.path.join(tmp, 'new')
            current_dir = os.path.join(tmp, 'current')
            os.makedirs(new_dir)
            os.makedirs(current_dir)
            for bootloader_file in method.bootloader_files:
                factory.make_file(current_dir, bootloader_file)

            method.link_bootloader(new_dir)

            for bootloader_file in method.bootloader_files:
                bootloader_file_path = os.path.join(new_dir, bootloader_file)
                self.assertTrue(os.path.isfile(bootloader_file_path))

    def test_link_bootloader_links_bootloaders_found_elsewhere_on_fs(self):
        method = FakeBootMethod()
        with tempdir() as tmp:
            bootresources_dir = os.path.join(tmp, 'boot-resources')
            new_dir = os.path.join(bootresources_dir, 'new')
            current_dir = os.path.join(bootresources_dir, 'current')
            os.makedirs(new_dir)
            os.makedirs(current_dir)
            for bootloader_file in method.bootloader_files:
                factory.make_file(tmp, bootloader_file)
                atomic_symlink(os.path.join(tmp, bootloader_file),
                               os.path.join(current_dir, bootloader_file))

            method.link_bootloader(new_dir)

            for bootloader_file in method.bootloader_files:
                bootloader_file_path = os.path.join(new_dir, bootloader_file)
                self.assertTrue(os.path.islink(bootloader_file_path))

    def test_link_bootloader_logs_missing_previous_downloaded_files(self):
        method = FakeBootMethod()
        mock_maaslog = self.patch(maaslog, 'error')
        mock_try_send_rack_event = self.patch(boot, 'try_send_rack_event')
        with tempdir() as tmp:
            new_dir = os.path.join(tmp, 'new')
            current_dir = os.path.join(tmp, 'current')
            os.makedirs(new_dir)
            os.makedirs(current_dir)
            for bootloader_file in method.bootloader_files[1:]:
                factory.make_file(current_dir, bootloader_file)

            method.link_bootloader(new_dir)

            self.assertThat(mock_maaslog, MockCalledOnce())
            self.assertThat(mock_try_send_rack_event, MockCalledOnce())

    def test_compose_template_namespace(self):
        kernel_params = make_kernel_parameters()
        method = FakeBootMethod()
        image_dir = compose_image_path(kernel_params.osystem,
                                       kernel_params.arch,
                                       kernel_params.subarch,
                                       kernel_params.release,
                                       kernel_params.label)

        template_namespace = method.compose_template_namespace(kernel_params)

        self.assertEqual("%s/%s" % (image_dir, kernel_params.initrd),
                         template_namespace['initrd_path'](kernel_params))
        self.assertEqual(compose_kernel_command_line(kernel_params),
                         template_namespace['kernel_command'](kernel_params))
        self.assertEqual(kernel_params, template_namespace['kernel_params'])
        self.assertEqual("%s/%s" % (image_dir, kernel_params.kernel),
                         template_namespace['kernel_path'](kernel_params))
        self.assertIsNone(template_namespace['dtb_path'](kernel_params))

    def test_compose_template_namespace_returns_filetype_when_missing(self):
        kernel_params = make_kernel_parameters(subarch='xgene-uboot-mustang',
                                               kernel=None,
                                               initrd=None,
                                               boot_dtb=None)
        method = FakeBootMethod()
        image_dir = compose_image_path(kernel_params.osystem,
                                       kernel_params.arch,
                                       kernel_params.subarch,
                                       kernel_params.release,
                                       kernel_params.label)

        template_namespace = method.compose_template_namespace(kernel_params)

        self.assertEqual("%s/boot-initrd" % image_dir,
                         template_namespace['initrd_path'](kernel_params))
        self.assertEqual(compose_kernel_command_line(kernel_params),
                         template_namespace['kernel_command'](kernel_params))
        self.assertEqual(kernel_params, template_namespace['kernel_params'])
        self.assertEqual("%s/boot-kernel" % image_dir,
                         template_namespace['kernel_path'](kernel_params))
        self.assertEqual("%s/boot-dtb" % image_dir,
                         template_namespace['dtb_path'](kernel_params))

    def test_compose_template_namespace_returns_dtb_file_when_arm(self):
        kernel_params = make_kernel_parameters(subarch='xgene-uboot-mustang')
        method = FakeBootMethod()
        image_dir = compose_image_path(kernel_params.osystem,
                                       kernel_params.arch,
                                       kernel_params.subarch,
                                       kernel_params.release,
                                       kernel_params.label)

        template_namespace = method.compose_template_namespace(kernel_params)

        self.assertEqual("%s/%s" % (image_dir, kernel_params.initrd),
                         template_namespace['initrd_path'](kernel_params))
        self.assertEqual(compose_kernel_command_line(kernel_params),
                         template_namespace['kernel_command'](kernel_params))
        self.assertEqual(kernel_params, template_namespace['kernel_params'])
        self.assertEqual("%s/%s" % (image_dir, kernel_params.kernel),
                         template_namespace['kernel_path'](kernel_params))
        self.assertEqual("%s/%s" % (image_dir, kernel_params.boot_dtb),
                         template_namespace['dtb_path'](kernel_params))
Example #6
0
class TestVirsh(MAASTestCase):
    """Tests for `probe_virsh_and_enlist`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def _probe_and_enlist_mock_run(self, *args):
        args = args[0]
        # if the argument is "define", we want to ensure that the boot
        # order has been set up correctly.
        if args[0] == "define":
            xml_file = args[1]
            with open(xml_file) as f:
                xml = f.read()
                doc = etree.XML(xml)
                evaluator = etree.XPathEvaluator(doc)
                boot_elements = evaluator(virsh.XPATH_BOOT)
                self.assertEqual(2, len(boot_elements))
                # make sure we set the network to come first, then the HD
                self.assertEqual('network', boot_elements[0].attrib['dev'])
                self.assertEqual('hd', boot_elements[1].attrib['dev'])
        return ""

    @inlineCallbacks
    def test_probe_and_enlist(self):
        # Patch VirshSSH list so that some machines are returned
        # with some fake architectures.
        user = factory.make_name('user')
        system_id = factory.make_name('system_id')
        machines = [factory.make_name('machine') for _ in range(5)]
        self.patch(virsh.VirshSSH, 'list').return_value = machines
        fake_arch = factory.make_name('arch')
        mock_arch = self.patch(virsh.VirshSSH, 'get_arch')
        mock_arch.return_value = fake_arch
        domain = factory.make_name('domain')

        # Patch get_state so that one of the machines is on, so we
        # can check that it will be forced off.
        fake_states = [
            virsh.VirshVMState.ON,
            virsh.VirshVMState.OFF,
            virsh.VirshVMState.OFF,
            virsh.VirshVMState.ON,
            virsh.VirshVMState.ON,
        ]
        mock_state = self.patch(virsh.VirshSSH, 'get_state')
        mock_state.side_effect = fake_states

        # Setup the power parameters that we should expect to be
        # the output of the probe_and_enlist
        fake_password = factory.make_string()
        poweraddr = factory.make_name('poweraddr')
        called_params = []
        fake_macs = []
        for machine in machines:
            macs = [factory.make_mac_address() for _ in range(4)]
            fake_macs.append(macs)
            called_params.append({
                'power_address': poweraddr,
                'power_id': machine,
                'power_pass': fake_password,
            })

        # Patch the get_mac_addresses so we get a known list of
        # mac addresses for each machine.
        mock_macs = self.patch(virsh.VirshSSH, 'get_mac_addresses')
        mock_macs.side_effect = fake_macs

        # Patch the poweroff and create as we really don't want these
        # actions to occur, but want to also check that they are called.
        mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff')
        mock_create_node = self.patch(virsh, 'create_node')
        mock_create_node.side_effect = asynchronous(
            lambda *args, **kwargs: None if machines[4] in args else system_id)
        mock_commission_node = self.patch(virsh, 'commission_node')

        # Patch login and logout so that we don't really contact
        # a server at the fake poweraddr
        mock_login = self.patch(virsh.VirshSSH, 'login')
        mock_login.return_value = True
        mock_logout = self.patch(virsh.VirshSSH, 'logout')
        mock_get_machine_xml = self.patch(virsh.VirshSSH, 'get_machine_xml')
        mock_get_machine_xml.side_effect = [
            SAMPLE_DUMPXML,
            SAMPLE_DUMPXML_2,
            SAMPLE_DUMPXML_3,
            SAMPLE_DUMPXML_4,
            SAMPLE_DUMPXML,
        ]

        mock_run = self.patch(virsh.VirshSSH, 'run')
        mock_run.side_effect = self._probe_and_enlist_mock_run

        # Perform the probe and enlist
        yield deferToThread(virsh.probe_virsh_and_enlist,
                            user,
                            poweraddr,
                            fake_password,
                            accept_all=True,
                            domain=domain)

        # Check that login was called with the provided poweraddr and
        # password.
        self.expectThat(mock_login, MockCalledOnceWith(poweraddr,
                                                       fake_password))

        # Check that the create command had the correct parameters for
        # each machine.
        self.expectThat(
            mock_create_node,
            MockCallsMatch(
                call(fake_macs[0], fake_arch, 'virsh', called_params[0],
                     domain, machines[0]),
                call(fake_macs[1], fake_arch, 'virsh', called_params[1],
                     domain, machines[1]),
                call(fake_macs[2], fake_arch, 'virsh', called_params[2],
                     domain, machines[2]),
                call(fake_macs[3], fake_arch, 'virsh', called_params[3],
                     domain, machines[3]),
                call(fake_macs[4], fake_arch, 'virsh', called_params[4],
                     domain, machines[4]),
            ))

        # The first and last machine should have poweroff called on it, as it
        # was initial in the on state.
        self.expectThat(mock_poweroff,
                        MockCallsMatch(
                            call(machines[0]),
                            call(machines[3]),
                        ))

        self.assertThat(mock_logout, MockCalledOnceWith())
        self.expectThat(
            mock_commission_node,
            MockCallsMatch(
                call(system_id, user),
                call(system_id, user),
                call(system_id, user),
                call(system_id, user),
            ))

    @inlineCallbacks
    def test_probe_and_enlist_login_failure(self):
        user = factory.make_name('user')
        poweraddr = factory.make_name('poweraddr')
        mock_login = self.patch(virsh.VirshSSH, 'login')
        mock_login.return_value = False
        with ExpectedException(virsh.VirshError):
            yield deferToThread(virsh.probe_virsh_and_enlist,
                                user,
                                poweraddr,
                                password=factory.make_string())
Example #7
0
class TestLeaseSocketService(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super().setUp()
        self.patch(clusterservice,
                   "get_all_interfaces_definition").return_value = {}

    def patch_socket_path(self):
        path = self.make_dir()
        socket_path = os.path.join(path, "dhcpd.sock")
        self.patch(lease_socket_service,
                   "get_socket_path").return_value = socket_path
        return socket_path

    def patch_rpc_UpdateLease(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(UpdateLease)
        return protocol, connecting

    def send_notification(self, socket_path, payload):
        conn = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
        conn.connect(socket_path)
        conn.send(json.dumps(payload).encode("utf-8"))
        conn.close()

    def test_init(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, sentinel.reactor)
        self.assertIsInstance(service, Service)
        self.assertIsInstance(service, DatagramProtocol)
        self.assertIs(service.reactor, sentinel.reactor)
        self.assertIs(service.client_service, sentinel.service)
        self.assertEqual(socket_path, service.address)

    def test_startService_creates_socket(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, reactor)
        service.startService()
        self.addCleanup(service.stopService)
        self.assertThat(socket_path, PathExists())

    @defer.inlineCallbacks
    def test_stopService_deletes_socket(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, reactor)
        service.startService()
        yield service.stopService()
        self.assertThat(socket_path, Not(PathExists()))

    @defer.inlineCallbacks
    def test_notification_gets_added_to_notifications(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, reactor)
        service.startService()
        self.addCleanup(service.stopService)

        # Stop the looping call to check that the notification gets added
        # to notifications.
        process_done = service.done
        service.processor.stop()
        yield process_done
        service.processor = MagicMock()

        # Create test payload to send.
        packet = {"test": factory.make_name("test")}

        # Send notification to the socket should appear in notifications.
        yield deferToThread(self.send_notification, socket_path, packet)

        # Loop until the notifications has a notification.
        for elapsed, remaining, wait in retries(5, 0.1, reactor):
            if len(service.notifications) > 0:
                break
            else:
                yield pause(wait, reactor)

        # Should have one notitication.
        self.assertEqual([packet], list(service.notifications))

    @defer.inlineCallbacks
    def test_processNotification_gets_called_with_notification(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, reactor)
        dv = DeferredValue()

        # Mock processNotifcation to catch the call.
        def mock_processNotification(*args, **kwargs):
            dv.set(args)

        self.patch(service, "processNotification", mock_processNotification)

        # Start the service and stop it at the end of the test.
        service.startService()
        self.addCleanup(service.stopService)

        # Create test payload to send.
        packet = {"test": factory.make_name("test")}

        # Send notification to the socket and wait for notification.
        yield deferToThread(self.send_notification, socket_path, packet)
        yield dv.get(timeout=10)

        # Packet should be the argument passed to processNotifcation
        self.assertEqual((packet, ), dv.value)

    @defer.inlineCallbacks
    def test_processNotification_gets_called_multiple_times(self):
        socket_path = self.patch_socket_path()
        service = LeaseSocketService(sentinel.service, reactor)
        dvs = [DeferredValue(), DeferredValue()]

        # Mock processNotifcation to catch the call.
        def mock_processNotification(*args, **kwargs):
            for dv in dvs:
                if not dv.isSet:
                    dv.set(args)
                    break

        self.patch(service, "processNotification", mock_processNotification)

        # Start the service and stop it at the end of the test.
        service.startService()
        self.addCleanup(service.stopService)

        # Create test payload to send.
        packet1 = {"test1": factory.make_name("test1")}
        packet2 = {"test2": factory.make_name("test2")}

        # Send notifications to the socket and wait for notifications.
        yield deferToThread(self.send_notification, socket_path, packet1)
        yield deferToThread(self.send_notification, socket_path, packet2)
        yield dvs[0].get(timeout=10)
        yield dvs[1].get(timeout=10)

        # Packet should be the argument passed to processNotification in
        # order.
        self.assertEqual((packet1, ), dvs[0].value)
        self.assertEqual((packet2, ), dvs[1].value)

    @defer.inlineCallbacks
    def test_processNotification_send_to_region(self):
        protocol, connecting = self.patch_rpc_UpdateLease()
        self.addCleanup((yield connecting))

        client = getRegionClient()
        rpc_service = MagicMock()
        rpc_service.getClientNow.return_value = defer.succeed(client)
        service = LeaseSocketService(rpc_service, reactor)

        # Notification to region.
        packet = {
            "action": "commit",
            "mac": factory.make_mac_address(),
            "ip_family": "ipv4",
            "ip": factory.make_ipv4_address(),
            "timestamp": int(time.time()),
            "lease_time": 30,
            "hostname": factory.make_name("host"),
        }
        yield service.processNotification(packet, clock=reactor)
        self.assertThat(
            protocol.UpdateLease,
            MockCalledOnceWith(
                protocol,
                cluster_uuid=client.localIdent,
                action=packet["action"],
                mac=packet["mac"],
                ip_family=packet["ip_family"],
                ip=packet["ip"],
                timestamp=packet["timestamp"],
                lease_time=packet["lease_time"],
                hostname=packet["hostname"],
            ),
        )
Example #8
0
class TestRedfishPowerDriver(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def test_missing_packages(self):
        # there's nothing to check for, just confirm it returns []
        driver = RedfishPowerDriver()
        missing = driver.detect_missing_packages()
        self.assertItemsEqual([], missing)

    def test_get_url_with_ip(self):
        driver = RedfishPowerDriver()
        context = make_context()
        ip = context.get('power_address').encode('utf-8')
        expected_url = b"https://%s" % ip
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test_get_url_with_https(self):
        driver = RedfishPowerDriver()
        context = make_context()
        context['power_address'] = join(
            "https://", context['power_address'])
        expected_url = context.get('power_address').encode('utf-8')
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test_get_url_with_http(self):
        driver = RedfishPowerDriver()
        context = make_context()
        context['power_address'] = join(
            "http://", context['power_address'])
        expected_url = context.get('power_address').encode('utf-8')
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test__make_auth_headers(self):
        power_user = factory.make_name('power_user')
        power_pass = factory.make_name('power_pass')
        creds = "%s:%s" % (power_user, power_pass)
        authorization = b64encode(creds.encode('utf-8'))
        attributes = {
            b"User-Agent": [b"MAAS"],
            b"Authorization": [b"Basic " + authorization],
            b"Content-Type": [b"application/json; charset=utf-8"],
        }
        driver = RedfishPowerDriver()
        headers = driver.make_auth_headers(power_user, power_pass)
        self.assertEquals(headers, Headers(attributes))

    @inlineCallbacks
    def test_redfish_request_renders_response(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, 'Agent')
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, 'readBody')
        mock_readBody.return_value = succeed(
            json.dumps(SAMPLE_JSON_SYSTEMS).encode('utf-8'))
        expected_response = SAMPLE_JSON_SYSTEMS

        response, headers = yield driver.redfish_request(b"GET", uri, headers)
        self.assertEquals(expected_response, response)
        self.assertEquals(expected_headers.headers, headers)

    @inlineCallbacks
    def test_redfish_request_continues_partial_download_error(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, 'Agent')
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, 'readBody')
        error = PartialDownloadError(
            response=json.dumps(SAMPLE_JSON_SYSTEMS).encode('utf-8'),
            code=HTTPStatus.OK)
        mock_readBody.return_value = fail(error)
        expected_response = SAMPLE_JSON_SYSTEMS

        response, headers = yield driver.redfish_request(b"GET", uri, headers)
        self.assertEquals(expected_response, response)
        self.assertEquals(expected_headers.headers, headers)

    @inlineCallbacks
    def test_redfish_request_raises_failures(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, 'Agent')
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, 'readBody')
        error = PartialDownloadError(
            response=json.dumps(SAMPLE_JSON_SYSTEMS).encode('utf-8'),
            code=HTTPStatus.NOT_FOUND)
        mock_readBody.return_value = fail(error)

        with ExpectedException(PartialDownloadError):
            yield driver.redfish_request(b"GET", uri, headers)
        self.assertThat(mock_readBody, MockCalledOnceWith(
            expected_headers))

    @inlineCallbacks
    def test_redfish_request_raises_error_on_response_code_above_400(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, 'Agent')
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.BAD_REQUEST
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, 'readBody')

        with ExpectedException(PowerActionError):
            yield driver.redfish_request(b"GET", uri, headers)
        self.assertThat(mock_readBody, MockNotCalled())

    @inlineCallbacks
    def test_power_issues_power_reset(self):
        driver = RedfishPowerDriver()
        context = make_context()
        power_change = factory.make_name('power_change')
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b'1'
        mock_file_body_producer = self.patch(
            redfish_module, 'FileBodyProducer')
        payload = FileBodyProducer(
            BytesIO(
                json.dumps(
                    {
                        'ResetType': "%s" % power_change
                    }).encode('utf-8')))
        mock_file_body_producer.return_value = payload
        mock_redfish_request = self.patch(driver, 'redfish_request')
        expected_uri = join(
            url, REDFISH_POWER_CONTROL_ENDPOINT % node_id)
        yield driver.power(power_change, url, node_id, headers)
        self.assertThat(mock_redfish_request, MockCalledOnceWith(
            b"POST", expected_uri, headers, payload))

    @inlineCallbacks
    def test__set_pxe_boot(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        node_id = b'1'
        headers = driver.make_auth_headers(**context)
        mock_file_body_producer = self.patch(
            redfish_module, 'FileBodyProducer')
        payload = FileBodyProducer(
            BytesIO(
                json.dumps(
                    {
                        'Boot': {
                            'BootSourceOverrideEnabled': "Once",
                            'BootSourceOverrideTarget': "Pxe"
                        }
                    }).encode('utf-8')))
        mock_file_body_producer.return_value = payload
        mock_redfish_request = self.patch(driver, 'redfish_request')

        yield driver.set_pxe_boot(url, node_id, headers)
        self.assertThat(mock_redfish_request, MockCalledOnceWith(
            b"PATCH", join(url, b"redfish/v1/Systems/%s/" % node_id),
            headers, payload))

    @inlineCallbacks
    def test__power_on(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b'1'
        mock_redfish_request = self.patch(driver, 'redfish_request')
        mock_redfish_request.return_value = (
            SAMPLE_JSON_SYSTEMS, None)
        mock_set_pxe_boot = self.patch(driver, 'set_pxe_boot')
        mock_power_query = self.patch(driver, 'power_query')
        mock_power_query.return_value = "on"
        mock_power = self.patch(driver, 'power')

        yield driver.power_on(node_id, context)
        self.assertThat(mock_set_pxe_boot, MockCalledOnceWith(
            url, node_id, headers))
        self.assertThat(mock_power_query, MockCalledOnceWith(
            node_id, context))
        self.assertThat(mock_power, MockCallsMatch(
            call("ForceOff", url, node_id, headers),
            call("On", url, node_id, headers)))

    @inlineCallbacks
    def test__power_off(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b'1'
        mock_redfish_request = self.patch(driver, 'redfish_request')
        mock_redfish_request.return_value = (
            SAMPLE_JSON_SYSTEMS, None)
        mock_set_pxe_boot = self.patch(driver, 'set_pxe_boot')
        mock_power = self.patch(driver, 'power')

        yield driver.power_off(node_id, context)
        self.assertThat(mock_set_pxe_boot, MockCalledOnceWith(
            url, node_id, headers))
        self.assertThat(mock_power, MockCalledOnceWith(
            "ForceOff", url, node_id, headers))

    @inlineCallbacks
    def test_power_query_queries_on(self):
        driver = RedfishPowerDriver()
        power_change = "On"
        system_id = factory.make_name('system_id')
        context = make_context()
        mock_redfish_request = self.patch(driver, 'redfish_request')
        NODE_POWERED_ON = deepcopy(SAMPLE_JSON_SYSTEM)
        NODE_POWERED_ON['PowerState'] = "On"
        mock_redfish_request.side_effect = [
            (SAMPLE_JSON_SYSTEMS, None),
            (NODE_POWERED_ON, None),
        ]
        power_state = yield driver.power_query(system_id, context)
        self.assertEquals(power_state, power_change.lower())

    @inlineCallbacks
    def test_power_query_queries_off(self):
        driver = RedfishPowerDriver()
        power_change = "Off"
        system_id = factory.make_name('system_id')
        context = make_context()
        mock_redfish_request = self.patch(driver, 'redfish_request')
        mock_redfish_request.side_effect = [
            (SAMPLE_JSON_SYSTEMS, None),
            (SAMPLE_JSON_SYSTEM, None),
        ]
        power_state = yield driver.power_query(system_id, context)
        self.assertEquals(power_state, power_change.lower())
Example #9
0
class TestComposeMachine(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def make_requested_machine(self):
        return RequestedMachine(
            hostname=factory.make_name("hostname"),
            architecture="amd64/generic",
            cores=random.randint(1, 8),
            cpu_speed=random.randint(1000, 3000),
            memory=random.randint(1024, 8192),
            block_devices=[
                RequestedMachineBlockDevice(size=random.randint(8, 16))
            ],
            interfaces=[RequestedMachineInterface()],
        )

    @inlineCallbacks
    def test_unknown_pod_raises_UnknownPodType(self):
        unknown_type = factory.make_name("unknown")
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        with ExpectedException(exceptions.UnknownPodType):
            yield pods.compose_machine(
                unknown_type, {}, fake_request, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_handles_driver_not_returning_Deferred(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.compose.return_value = None
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'compose' did not "
                "return Deferred." % fake_driver.name
            ),
        ):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )

    @inlineCallbacks
    def test_handles_driver_resolving_to_None(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.compose.return_value = succeed(None)
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(exceptions.PodInvalidResources):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )

    @inlineCallbacks
    def test_handles_driver_not_resolving_to_tuple(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.compose.return_value = succeed({})
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'compose' returned "
                "invalid result." % fake_driver.name
            ),
        ):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )

    @inlineCallbacks
    def test_handles_driver_not_resolving_to_tuple_of_discovered(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.compose.return_value = succeed((object(), object()))
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'compose' returned "
                "invalid result." % fake_driver.name
            ),
        ):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )

    @inlineCallbacks
    def test_handles_driver_resolving_to_tuple_of_discovered(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        machine = DiscoveredMachine(
            hostname=factory.make_name("hostname"),
            architecture="amd64/generic",
            cores=random.randint(1, 8),
            cpu_speed=random.randint(1000, 3000),
            memory=random.randint(1024, 8192),
            block_devices=[],
            interfaces=[],
        )
        hints = DiscoveredPodHints(
            cores=random.randint(1, 8),
            cpu_speed=random.randint(1000, 2000),
            memory=random.randint(1024, 8192),
            local_storage=0,
        )
        fake_driver.compose.return_value = succeed((machine, hints))
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        result = yield pods.compose_machine(
            fake_driver.name, {}, fake_request, pod_id=pod_id, name=pod_name
        )
        self.assertEqual({"machine": machine, "hints": hints}, result)

    @inlineCallbacks
    def test_handles_driver_raising_NotImplementedError(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.compose.return_value = fail(NotImplementedError())
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(NotImplementedError):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )

    @inlineCallbacks
    def test_handles_driver_raising_any_Exception(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_exception_type = factory.make_exception_type()
        fake_exception_msg = factory.make_name("error")
        fake_exception = fake_exception_type(fake_exception_msg)
        fake_driver.compose.return_value = fail(fake_exception)
        fake_request = self.make_requested_machine()
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape("Failed talking to pod: " + fake_exception_msg),
        ):
            yield pods.compose_machine(
                fake_driver.name,
                {},
                fake_request,
                pod_id=pod_id,
                name=pod_name,
            )
Example #10
0
class TestNetworksMonitoringService(MAASTestCase):
    """Tests of `NetworksMonitoringService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def makeService(self, *args, **kwargs):
        service = StubNetworksMonitoringService(*args, **kwargs)
        self.addCleanup(service._releaseSoleResponsibility)
        return service

    def test_init(self):
        service = self.makeService()
        self.assertThat(service, IsInstance(MultiService))
        self.assertThat(service.interface_monitor.step,
                        Equals(service.interval))
        self.assertThat(
            service.interface_monitor.call,
            Equals((service.updateInterfaces, (), {})),
        )

    @inlineCallbacks
    def test_get_all_interfaces_definition_is_called_in_thread(self):
        service = self.makeService()
        self.patch(services, "get_all_interfaces_definition",
                   threading.current_thread)
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, HasLength(1))
        [thread] = service.interfaces
        self.assertThat(thread, IsInstance(threading.Thread))
        self.assertThat(thread, Not(Equals(threadable.ioThread)))

    @inlineCallbacks
    def test_getInterfaces_called_to_get_configuration(self):
        service = self.makeService()
        getInterfaces = self.patch(service, "getInterfaces")
        getInterfaces.return_value = succeed(sentinel.config)
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, Equals([sentinel.config]))

    @inlineCallbacks
    def test_logs_errors(self):
        service = self.makeService()
        with TwistedLoggerFixture() as logger:
            error_message = factory.make_string()
            get_interfaces = self.patch(services,
                                        "get_all_interfaces_definition")
            get_interfaces.side_effect = Exception(error_message)
            yield service.updateInterfaces()
        self.assertThat(
            logger.output,
            DocTestMatches(
                "Failed to update and/or record network interface configuration"
                "..."),
        )

    @inlineCallbacks
    def test_starting_service_triggers_interface_update(self):
        get_interfaces = self.patch(services, "get_all_interfaces_definition")
        get_interfaces.side_effect = [sentinel.config]
        clock = Clock()
        service = self.makeService(clock=clock)
        yield service.startService()
        self.assertThat(service.update_interface__calls, Equals(1))
        yield service.stopService()

    @inlineCallbacks
    def test_recordInterfaces_called_when_nothing_previously_recorded(self):
        get_interfaces = self.patch(services, "get_all_interfaces_definition")
        get_interfaces.side_effect = [sentinel.config]

        service = self.makeService()
        self.assertThat(service.interfaces, Equals([]))
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, Equals([sentinel.config]))

        self.assertThat(get_interfaces, MockCalledOnceWith())

    @inlineCallbacks
    def test_recordInterfaces_called_when_interfaces_changed(self):
        get_interfaces = self.patch(services, "get_all_interfaces_definition")
        # Configuration changes between the first and second call.
        get_interfaces.side_effect = [sentinel.config1, sentinel.config2]

        service = self.makeService()
        self.assertThat(service.interfaces, HasLength(0))
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, Equals([sentinel.config1]))
        yield service.updateInterfaces()
        self.assertThat(service.interfaces,
                        Equals([sentinel.config1, sentinel.config2]))

        self.assertThat(get_interfaces, MockCallsMatch(call(), call()))

    @inlineCallbacks
    def test_recordInterfaces_not_called_when_interfaces_not_changed(self):
        get_interfaces = self.patch(services, "get_all_interfaces_definition")
        # Configuration does NOT change between the first and second call.
        get_interfaces.side_effect = [{}, {}]

        service = self.makeService()
        self.assertThat(service.interfaces, HasLength(0))
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, Equals([{}]))
        yield service.updateInterfaces()
        self.assertThat(service.interfaces, Equals([{}]))

        self.assertThat(get_interfaces, MockCallsMatch(call(), call()))

    @inlineCallbacks
    def test_recordInterfaces_called_after_failure(self):
        get_interfaces = self.patch(services, "get_all_interfaces_definition")
        get_interfaces.return_value = {}

        service = self.makeService()
        recordInterfaces = self.patch(service, "recordInterfaces")
        recordInterfaces.side_effect = [Exception, None, None]

        # Using the logger fixture prevents the test case from failing due
        # to the logged exception.
        with TwistedLoggerFixture():
            # recordInterfaces is called the first time, as expected.
            recordInterfaces.reset_mock()
            yield service.updateInterfaces()
            self.assertThat(recordInterfaces, MockCalledOnceWith({}, None))

            # recordInterfaces is called the second time too; the service noted
            # that it crashed last time and knew to run it again.
            recordInterfaces.reset_mock()
            yield service.updateInterfaces()
            self.assertThat(recordInterfaces, MockCalledOnceWith({}, None))

            # recordInterfaces is NOT called the third time; the service noted
            # that the configuration had not changed.
            recordInterfaces.reset_mock()
            yield service.updateInterfaces()
            self.assertThat(recordInterfaces, MockNotCalled())

    @inlineCallbacks
    def test_assumes_sole_responsibility_before_updating(self):
        # A filesystem lock is used to prevent multiple network monitors from
        # running on each host machine.
        service = self.makeService()

        # Not locked after instantiating the service.
        lock = service._lock
        self.assertFalse(lock.is_locked())

        # It's locked when the service is started and has begun iterating.
        service.startService()
        try:
            # It's locked once the first iteration is done.
            yield service.iterations.get()
            self.assertTrue(lock.is_locked())

            # It remains locked as the service iterates.
            yield service.updateInterfaces()
            self.assertTrue(lock.is_locked())

        finally:
            yield service.stopService()

        # It's unlocked now that the service is stopped.
        self.assertFalse(lock.is_locked())

        # Interfaces were recorded.
        self.assertThat(service.interfaces, Not(Equals([])))

    @inlineCallbacks
    def test_does_not_update_if_cannot_assume_sole_responsibility(self):
        # A filesystem lock is used to prevent multiple network monitors from
        # running on each host machine.
        lock = NetworksMonitoringLock()

        with lock:
            service = self.makeService()
            # Iterate a few times.
            yield service.updateInterfaces()
            yield service.updateInterfaces()
            yield service.updateInterfaces()

        # Interfaces were NOT recorded.
        self.assertThat(service.interfaces, Equals([]))

    @inlineCallbacks
    def test_attempts_to_assume_sole_responsibility_on_each_iteration(self):
        # A filesystem lock is used to prevent multiple network monitors from
        # running on each host machine.
        lock = NetworksMonitoringLock()

        with lock:
            service = self.makeService()
            # Iterate one time.
            yield service.updateInterfaces()

        # Interfaces have not been recorded yet.
        self.assertThat(service.interfaces, Equals([]))
        # Iterate once more and ...
        yield service.updateInterfaces()
        # ... interfaces ARE recorded.
        self.assertThat(service.interfaces, Not(Equals([])))
class TestNodePowerMonitorService(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def test_init_sets_up_timer_correctly(self):
        service = npms.NodePowerMonitorService()
        self.assertThat(
            service,
            MatchesStructure.byEquality(
                call=(service.try_query_nodes, tuple(), {}),
                step=15,
                clock=None,
            ),
        )

    def make_monitor_service(self):
        service = npms.NodePowerMonitorService(Clock())
        return service

    def test_query_nodes_calls_the_region(self):
        service = self.make_monitor_service()

        rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture())
        proto_region, io = rpc_fixture.makeEventLoop(
            region.ListNodePowerParameters
        )
        proto_region.ListNodePowerParameters.return_value = succeed(
            {"nodes": []}
        )

        client = getRegionClient()
        d = service.query_nodes(client)
        io.flush()

        self.assertEqual(None, extract_result(d))
        self.assertThat(
            proto_region.ListNodePowerParameters,
            MockCalledOnceWith(ANY, uuid=client.localIdent),
        )

    def test_query_nodes_calls_query_all_nodes(self):
        service = self.make_monitor_service()
        service.max_nodes_at_once = sentinel.max_nodes_at_once

        example_power_parameters = {
            "system_id": factory.make_UUID(),
            "hostname": factory.make_hostname(),
            "power_state": factory.make_name("power_state"),
            "power_type": factory.make_name("power_type"),
            "context": {},
        }

        rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture())
        proto_region, io = rpc_fixture.makeEventLoop(
            region.ListNodePowerParameters
        )
        proto_region.ListNodePowerParameters.side_effect = [
            succeed({"nodes": [example_power_parameters]}),
            succeed({"nodes": []}),
        ]

        query_all_nodes = self.patch(npms, "query_all_nodes")

        d = service.query_nodes(getRegionClient())
        io.flush()

        self.assertEqual(None, extract_result(d))
        self.assertThat(
            query_all_nodes,
            MockCalledOnceWith(
                [example_power_parameters],
                max_concurrency=sentinel.max_nodes_at_once,
                clock=service.clock,
            ),
        )

    def test_query_nodes_copes_with_NoSuchCluster(self):
        service = self.make_monitor_service()

        rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture())
        proto_region, io = rpc_fixture.makeEventLoop(
            region.ListNodePowerParameters
        )
        client = getRegionClient()
        proto_region.ListNodePowerParameters.return_value = fail(
            exceptions.NoSuchCluster.from_uuid(client.localIdent)
        )

        d = service.query_nodes(client)
        d.addErrback(service.query_nodes_failed, client.localIdent)
        with FakeLogger("maas") as maaslog:
            io.flush()

        self.assertEqual(None, extract_result(d))
        self.assertDocTestMatches(
            "Rack controller '...' is not recognised.", maaslog.output
        )

    def test_query_nodes_copes_with_losing_connection_to_region(self):
        service = self.make_monitor_service()

        client = Mock(
            return_value=fail(ConnectionDone("Connection was closed cleanly."))
        )

        with FakeLogger("maas") as maaslog:
            d = service.query_nodes(client)
            d.addErrback(service.query_nodes_failed, sentinel.ident)

        self.assertEqual(None, extract_result(d))
        self.assertDocTestMatches(
            "Lost connection to region controller.", maaslog.output
        )

    def test_try_query_nodes_logs_other_errors(self):
        service = self.make_monitor_service()
        self.patch(npms, "getRegionClient").return_value = sentinel.client
        sentinel.client.localIdent = factory.make_UUID()

        query_nodes = self.patch(service, "query_nodes")
        query_nodes.return_value = fail(
            ZeroDivisionError("Such a shame I can't divide by zero")
        )

        with FakeLogger("maas") as maaslog, TwistedLoggerFixture():
            d = service.try_query_nodes()

        self.assertEqual(None, extract_result(d))
        self.assertDocTestMatches(
            "Failed to query nodes' power status: "
            "Such a shame I can't divide by zero",
            maaslog.output,
        )
Example #12
0
class TestBeaconingSocketProtocol(SharedSecretTestCase):
    """Tests for `BeaconingSocketProtocol`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=15)

    @inlineCallbacks
    def test__creates_listen_port_when_run_with_IReactorMulticast(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(reactor, port=0)
        self.assertThat(protocol.listen_port, Not(Is(None)))
        # This tests that the post gets closed properly; otherwise the test
        # suite will complain about things left in the reactor.
        yield protocol.stopProtocol()

    def test__skips_creating_listen_port_when_run_with_fake_reactor(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(Clock(), port=0)
        self.assertThat(protocol.listen_port, Is(None))
        # No listen port, so stopProtocol() shouldn't return a Deferred.
        result = protocol.stopProtocol()
        self.assertThat(result, Is(None))

    @inlineCallbacks
    def test__sends_and_receives_unicast_beacons(self):
        # Note: Always use a random port for testing. (port=0)
        logger = self.useFixture(TwistedLoggerFixture())
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=True,
            loopback=True,
            interface="::",
            debug=True,
        )
        self.assertThat(protocol.listen_port, Not(Is(None)))
        listen_port = protocol.listen_port._realPortNumber
        self.write_secret()
        beacon = create_beacon_payload("solicitation", {})
        rx_uuid = beacon.payload["uuid"]
        destination = random.choice(["::ffff:127.0.0.1", "::1"])
        protocol.send_beacon(beacon, (destination, listen_port))
        # Pretend we didn't send this packet. Otherwise we won't reply to it.
        # We have to do this now, before the reactor runs again.
        transmitted = protocol.tx_queue.pop(rx_uuid, None)
        # Since we've instructed the protocol to loop back packets for testing,
        # it should have sent a multicast solicitation, received it back, sent
        # an advertisement, then received it back. So we'll wait for two
        # packets to be sent.
        yield wait_for_rx_packets(protocol, 2)
        # Grab the beacon we know we transmitted and then received.
        received = protocol.rx_queue.pop(rx_uuid, None)
        self.assertThat(transmitted, Equals(beacon))
        self.assertThat(received[0].json["payload"]["uuid"], Equals(rx_uuid))
        # Grab the subsequent packets from the queues.
        transmitted = protocol.tx_queue.popitem()[1]
        received = protocol.rx_queue.popitem()[1]
        # We should have received a second packet to ack the first beacon.
        self.assertThat(received[0].json["payload"]["acks"], Equals(rx_uuid))
        # We should have transmitted an advertisement in response to the
        # solicitation.
        self.assertThat(transmitted.type, Equals("advertisement"))
        # This tests that the post gets closed properly; otherwise the test
        # suite will complain about things left in the reactor.
        yield protocol.stopProtocol()
        # In debug mode, the logger should have printed each packet.
        self.assertThat(
            logger.output,
            DocTestMatches("...Beacon received:...Own beacon received:..."),
        )

    @inlineCallbacks
    def test__send_multicast_beacon_sets_ipv4_source(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=True,
            loopback=True,
            interface="::",
            debug=False,
        )
        self.assertThat(protocol.listen_port, Not(Is(None)))
        listen_port = protocol.listen_port._realPortNumber
        self.write_secret()
        beacon = create_beacon_payload("advertisement", {})
        protocol.send_multicast_beacon("127.0.0.1", beacon, port=listen_port)
        # Verify that we received the packet.
        yield wait_for_rx_packets(protocol, 1)
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__send_multicast_beacon_sets_ipv6_source(self):
        # Due to issues beyond my control, this test doesn't do what I expected
        # it to do. But it's still useful for code coverage (to make sure no
        # blatant exceptions occur in the IPv6 path).
        # self.skipTest(
        #    "IPv6 loopback multicast isn't working, for whatever reason.")
        # Since we can't test IPv6 multicast on the loopback interface, another
        # method can be used to verify that it's working:
        # (1) sudo tcpdump -i <physical-interface> 'udp and port == 5240'
        # (2) bin/maas-rack send-beacons -p 5240
        # Verifying IPv6 (and IPv4) multicast group join behavior can be
        # validated by doing something like:
        # (1) bin/maas-rack send-beacons -t 600
        #     (the high timeout will cause it to wait for 10 minutes)
        # (2) ip maddr show | egrep 'ff02::15a|224.0.0.118|$'
        # The expected result from command (2) will be that 'egrep' will
        # highlight the MAAS multicast groups in red text. Any Ethernet
        # interface with an assigned IPv4 address should have joined the
        # 224.0.0.118 group. All Ethernet interfaces should have joined the
        # 'ff02::15a' group.
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=True,
            loopback=True,
            interface="::",
            debug=False,
        )
        self.assertThat(protocol.listen_port, Not(Is(None)))
        listen_port = protocol.listen_port._realPortNumber
        self.write_secret()
        beacon = create_beacon_payload("advertisement", {})
        # The loopback interface ifindex should always be 1; this is saying
        # to send an IPv6 multicast on ifIndex == 1.
        protocol.send_multicast_beacon(1, beacon, port=listen_port)
        # Instead of skipping the test, just don't expect to receive anything.
        # yield wait_for_rx_packets(protocol, 1)
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__hints_for_own_beacon_received_on_another_interface(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        fake_tx_beacon = FakeBeaconPayload(uuid, ifname="eth0")
        protocol.tx_queue[uuid] = fake_tx_beacon
        fake_rx_beacon = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            # Note the different receive interface.
            "interface": "eth1",
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon)
        # Should only have created one hint.
        hint = protocol.topology_hints[uuid].pop()
        self.assertThat(hint.hint, Equals("rx_own_beacon_on_other_interface"))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__hints_for_own_beacon_received_on_same_interface(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        fake_tx_beacon = FakeBeaconPayload(uuid, ifname="eth0")
        protocol.tx_queue[uuid] = fake_tx_beacon
        fake_rx_beacon = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            "interface": "eth0",
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon)
        # Should only have created one hint.
        hint = protocol.topology_hints[uuid].pop()
        self.assertThat(hint.hint, Equals("rx_own_beacon_on_tx_interface"))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__hints_for_same_beacon_seen_on_multiple_interfaces(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        self.patch(protocol, "send_beacon")
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        fake_tx_beacon = FakeBeaconPayload(uuid, ifname="eth0")
        fake_rx_beacon_eth0 = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            "interface": "eth0",
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        fake_rx_beacon_eth1 = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            "interface": "eth1",
            "vid": 100,
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon_eth0)
        protocol.beaconReceived(fake_rx_beacon_eth1)
        hints = protocol.topology_hints[uuid]
        expected_hints = {
            TopologyHint(
                ifname="eth0",
                vid=None,
                hint="same_local_fabric_as",
                related_ifname="eth1",
                related_vid=100,
                related_mac=None,
            ),
            TopologyHint(
                ifname="eth1",
                vid=100,
                hint="same_local_fabric_as",
                related_ifname="eth0",
                related_vid=None,
                related_mac=None,
            ),
        }
        self.assertThat(hints, Equals(expected_hints))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__hints_for_remote_unicast(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        self.patch(protocol, "send_beacon")
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        tx_mac = factory.make_mac_address()
        fake_tx_beacon = FakeBeaconPayload(uuid,
                                           ifname="eth1",
                                           mac=tx_mac,
                                           vid=100)
        fake_rx_beacon = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "127.0.0.1",
            "interface": "eth0",
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon)
        hints = protocol.topology_hints[uuid]
        expected_hints = {
            TopologyHint(
                ifname="eth0",
                vid=None,
                hint="routable_to",
                related_ifname="eth1",
                related_vid=100,
                related_mac=tx_mac,
            )
        }
        self.assertThat(hints, Equals(expected_hints))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__hints_for_remote_multicast(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        self.patch(protocol, "send_beacon")
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        tx_mac = factory.make_mac_address()
        fake_tx_beacon = FakeBeaconPayload(uuid,
                                           ifname="eth1",
                                           mac=tx_mac,
                                           vid=100)
        fake_rx_beacon = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            "interface": "eth0",
            "vid": 200,
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon)
        hints = protocol.topology_hints[uuid]
        expected_hints = {
            TopologyHint(
                ifname="eth0",
                vid=200,
                hint="on_remote_network",
                related_ifname="eth1",
                related_vid=100,
                related_mac=tx_mac,
            )
        }
        self.assertThat(hints, Equals(expected_hints))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__getJSONTopologyHints_converts_hints_to_dictionary(self):
        # Note: Always use a random port for testing. (port=0)
        protocol = BeaconingSocketProtocol(
            reactor,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        self.patch(protocol, "send_beacon")
        # Need to generate a real UUID with the current time, so it doesn't
        # get aged out.
        uuid = str(uuid1())
        # Make the protocol think we sent a beacon with this UUID already.
        tx_mac = factory.make_mac_address()
        fake_tx_beacon = FakeBeaconPayload(uuid,
                                           ifname="eth1",
                                           mac=tx_mac,
                                           vid=100)
        fake_rx_beacon = {
            "source_ip": "127.0.0.1",
            "source_port": 5240,
            "destination_ip": "224.0.0.118",
            "interface": "eth0",
            "type": "solicitation",
            "payload": fake_tx_beacon.payload,
        }
        protocol.beaconReceived(fake_rx_beacon)
        all_hints = protocol.getJSONTopologyHints()
        expected_hints = [
            # Note: since vid=None on the received beacon, we expect that
            # the hint won't have a 'vid' field.
            dict(
                ifname="eth0",
                hint="on_remote_network",
                related_ifname="eth1",
                related_vid=100,
                related_mac=tx_mac,
            )
        ]
        self.assertThat(all_hints, Equals(expected_hints))
        yield protocol.stopProtocol()

    @inlineCallbacks
    def test__queues_multicast_beacon_soliciations_upon_request(self):
        # Note: Always use a random port for testing. (port=0)
        clock = Clock()
        protocol = BeaconingSocketProtocol(
            clock,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        send_mcast_mock = self.patch(protocol, "send_multicast_beacons")
        self.patch(protocol, "send_beacon")
        yield protocol.queueMulticastBeaconing(solicitation=True)
        clock.advance(0)
        self.assertThat(send_mcast_mock, MockCalledOnceWith({},
                                                            "solicitation"))

    @inlineCallbacks
    def test__multicasts_at_most_once_per_five_seconds(self):
        # Note: Always use a random port for testing. (port=0)
        clock = Clock()
        protocol = BeaconingSocketProtocol(
            clock,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        monotonic_mock = self.patch(services.time, "monotonic")
        send_mcast_mock = self.patch(protocol, "send_multicast_beacons")
        self.patch(protocol, "send_beacon")
        monotonic_mock.side_effect = [
            # Initial queue
            6,
            # Initial dequeue
            6,
            # Second queue (hasn't yet been 5 seconds)
            10,
            # Third queue
            11,
            # Second dequeue
            11,
        ]
        yield protocol.queueMulticastBeaconing()
        clock.advance(0)
        self.assertThat(send_mcast_mock, MockCalledOnceWith({},
                                                            "advertisement"))
        send_mcast_mock.reset_mock()
        yield protocol.queueMulticastBeaconing()
        yield protocol.queueMulticastBeaconing(solicitation=True)
        clock.advance(4.9)
        self.assertThat(send_mcast_mock, MockNotCalled())
        clock.advance(0.1)
        self.assertThat(send_mcast_mock, MockCalledOnceWith({},
                                                            "solicitation"))

    @inlineCallbacks
    def test__multiple_beacon_requests_coalesced(self):
        # Note: Always use a random port for testing. (port=0)
        clock = Clock()
        protocol = BeaconingSocketProtocol(
            clock,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        send_mcast_mock = self.patch(protocol, "send_multicast_beacons")
        self.patch(protocol, "send_beacon")
        yield protocol.queueMulticastBeaconing()
        yield protocol.queueMulticastBeaconing()
        clock.advance(5)
        self.assertThat(send_mcast_mock, MockCalledOnceWith({},
                                                            "advertisement"))

    @inlineCallbacks
    def test__solicitation_wins_when_multiple_requests_queued(self):
        # Note: Always use a random port for testing. (port=0)
        clock = Clock()
        protocol = BeaconingSocketProtocol(
            clock,
            port=0,
            process_incoming=False,
            loopback=True,
            interface="::",
            debug=True,
        )
        # Don't try to send out any replies.
        self.patch(services, "create_beacon_payload")
        send_mcast_mock = self.patch(protocol, "send_multicast_beacons")
        self.patch(protocol, "send_beacon")
        yield protocol.queueMulticastBeaconing()
        yield protocol.queueMulticastBeaconing(solicitation=True)
        clock.advance(5)
        self.assertThat(send_mcast_mock, MockCalledOnceWith({},
                                                            "solicitation"))
Example #13
0
class TestProcessProtocolService(MAASTestCase):
    """Tests for `JSONPerLineProtocol`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=15)

    def setUp(self):
        super(TestProcessProtocolService, self).setUp()
        # Alter timings of terminateProcess so we don't have to wait so long.
        self.patch(
            services,
            "terminateProcess",
            partial(services.terminateProcess, quit_after=0.2, kill_after=0.4),
        )

    def test__base_class_cannot_be_used(self):
        with ExpectedException(TypeError):
            ProcessProtocolService()

    @inlineCallbacks
    def test__starts_and_stops_process(self):
        service = SleepProcessProtocolService()
        with TwistedLoggerFixture() as logger:
            service.startService()
            self.assertThat(service._protocol.done, Not(IsFiredDeferred()))
            yield service.stopService()
            result = yield service._protocol.done
            self.assertThat(result, Is(None))
        self.assertThat(
            logger.output,
            DocTestMatches("SleepProcessProtocolService started.\n"
                           "-...-\n"
                           "SleepProcessProtocolService ..."),
        )
        with ExpectedException(ProcessExitedAlready):
            service._process.signalProcess("INT")

    @inlineCallbacks
    def test__handles_normal_process_exit(self):
        # If the spawned process exits with an exit code of zero this is
        # logged as "ended normally".
        service = TrueProcessProtocolService()
        with TwistedLoggerFixture() as logger:
            service.startService()
            yield service._protocol.done
            yield service.stopService()
        self.assertThat(
            logger.output,
            Equals("TrueProcessProtocolService started.\n"
                   "---\n"
                   "TrueProcessProtocolService ended normally."),
        )

    @inlineCallbacks
    def test__handles_terminated_process_exit(self):
        # During service stop the spawned process can be terminated with a
        # signal. This is logged with a slightly different error message.
        service = SleepProcessProtocolService()
        with TwistedLoggerFixture() as logger:
            service.startService()
            yield service.stopService()
        self.assertThat(
            logger.output,
            Equals("SleepProcessProtocolService started.\n"
                   "---\n"
                   "SleepProcessProtocolService was terminated."),
        )

    @inlineCallbacks
    def test__handles_abnormal_process_exit(self):
        # If the spawned process exits with a non-zero exit code this is
        # logged as "a probable error".
        service = FalseProcessProtocolService()
        with TwistedLoggerFixture() as logger:
            service.startService()
            result = yield service._protocol.done
            self.assertThat(result, Is(None))
            yield service.stopService()
        self.assertThat(
            logger.output,
            DocTestMatches("FalseProcessProtocolService started.\n"
                           "---\n"
                           "FalseProcessProtocolService failed.\n"
                           "Traceback (most recent call last):\n"
                           "...: A process has ended with a probable error "
                           "condition: process ended with exit code 1."),
        )

    @inlineCallbacks
    def test__calls_protocol_callback(self):
        service = EchoProcessProtocolService()
        service.startService()
        # Wait for the protocol to finish. (the echo process will stop)
        result = yield service._protocol.done
        self.assertThat(service._callback, MockCalledOnceWith([{}]))
        yield service.stopService()
        self.assertThat(result, Is(None))
Example #14
0
class TestJSONPerLineProtocol(MAASTestCase):
    """Tests for `JSONPerLineProtocol`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @inlineCallbacks
    def test__propagates_exit_errors(self):
        proto = JSONPerLineProtocol(callback=lambda json: None)
        reactor.spawnProcess(proto, b"false", (b"false", ))
        with ExpectedException(ProcessTerminated, ".* exit code 1"):
            yield proto.done

    def test__parses_only_full_lines(self):
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        # Send an empty JSON dictionary using 3 separate writes.
        proto.outReceived(b"{")
        # No callback yet...
        self.expectThat(callback, MockCallsMatch())
        proto.outReceived(b"}")
        # Still no callback...
        self.expectThat(callback, MockCallsMatch())
        proto.outReceived(b"\n")
        # After a newline, we expect the JSON to be parsed and the callback
        # to receive an empty Python dictionary (which corresponds to the JSON
        # that was sent.)
        self.expectThat(callback, MockCallsMatch(call([{}])))

    def test__ignores_interspersed_zero_length_writes(self):
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        proto.outReceived(b"")
        self.expectThat(callback, MockCallsMatch())
        proto.outReceived(b"{}\n")
        self.expectThat(callback, MockCallsMatch(call([{}])))
        proto.outReceived(b"")
        self.expectThat(callback, MockCallsMatch(call([{}])))
        proto.outReceived(b"{}\n")
        self.expectThat(callback, MockCallsMatch(call([{}]), call([{}])))

    def test__logs_non_json_output(self):
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        with TwistedLoggerFixture() as logger:
            proto.outReceived(b"{\n")
        self.assertThat(logger.output,
                        DocTestMatches("Failed to parse JSON: ..."))

    def test__logs_stderr(self):
        message = factory.make_name("message")
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        with TwistedLoggerFixture() as logger:
            proto.errReceived((message + "\n").encode("ascii"))
        self.assertThat(logger.output, Equals(message))

    def test__logs_only_full_lines_from_stderr(self):
        message = factory.make_name("message")
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        with TwistedLoggerFixture() as logger:
            proto.errReceived(message.encode("ascii"))
        self.assertThat(logger.output, Equals(""))

    def test__logs_stderr_at_process_end(self):
        message = factory.make_name("message")
        callback = Mock()
        proto = JSONPerLineProtocol(callback=callback)
        proto.connectionMade()
        with TwistedLoggerFixture() as logger:
            proto.errReceived(message.encode("ascii"))
            self.assertThat(logger.output, Equals(""))
            proto.processEnded(Failure(ProcessDone(0)))
        self.assertThat(logger.output, Equals(message))

    @inlineCallbacks
    def test__propagates_errors_from_command(self):
        proto = JSONPerLineProtocol(callback=lambda obj: None)
        proto.connectionMade()
        reason = Failure(ProcessTerminated(1))
        proto.processEnded(reason)
        with ExpectedException(ProcessTerminated):
            yield proto.done
Example #15
0
class TestRackProxy(MAASTestCase):
    """Tests for `RackProxy` for `RackExternalService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5000)

    def setUp(self):
        super().setUp()
        self.patch(
            clusterservice, "get_all_interfaces_definition"
        ).return_value = {}

    def make_cidrs(self):
        return frozenset(
            {
                str(factory.make_ipv4_network()),
                str(factory.make_ipv6_network()),
            }
        )

    def extract_regions(self, rpc_service):
        return frozenset(
            {
                client.address[0]
                for _, client in rpc_service.connections.items()
            }
        )

    def make_RackProxy_ExternalService(self, rpc_service, reactor):
        proxy = external.RackProxy()
        service = make_startable_RackExternalService(
            self, rpc_service, reactor, [("proxy", proxy)]
        )
        return service, proxy

    @inlineCallbacks
    def test_getConfiguration_returns_configuration_object(self):
        is_region, is_rack = factory.pick_bool(), factory.pick_bool()
        allowed_cidrs = self.make_cidrs()
        proxy_enabled = factory.pick_bool()
        proxy_prefer_v4_proxy = factory.pick_bool()
        proxy_port = random.randint(1000, 8000)
        rpc_service, protocol = yield prepareRegion(
            self,
            is_region=is_region,
            is_rack=is_rack,
            proxy_enabled=proxy_enabled,
            proxy_allowed_cidrs=allowed_cidrs,
            proxy_port=proxy_port,
            proxy_prefer_v4_proxy=proxy_prefer_v4_proxy,
        )
        region_ips = self.extract_regions(rpc_service)
        service, proxy = self.make_RackProxy_ExternalService(
            rpc_service, reactor
        )
        yield service.startService()
        self.addCleanup((yield service.stopService))

        config = yield service._getConfiguration()
        observed = proxy._getConfiguration(
            config.controller_type,
            config.proxy_configuration,
            config.connections,
        )

        self.assertThat(observed, IsInstance(external._ProxyConfiguration))
        self.assertThat(
            observed,
            MatchesStructure.byEquality(
                enabled=proxy_enabled,
                port=proxy_port,
                allowed_cidrs=allowed_cidrs,
                prefer_v4_proxy=proxy_prefer_v4_proxy,
                upstream_proxies=region_ips,
                is_region=is_region,
                is_rack=is_rack,
            ),
        )

    @inlineCallbacks
    def test_tryUpdate_updates_proxy_server(self):
        self.useFixture(MAASRootFixture())
        allowed_cidrs = self.make_cidrs()
        proxy_prefer_v4_proxy = factory.pick_bool()
        proxy_port = random.randint(1000, 8000)
        rpc_service, _ = yield prepareRegion(
            self,
            proxy_allowed_cidrs=allowed_cidrs,
            proxy_port=proxy_port,
            proxy_prefer_v4_proxy=proxy_prefer_v4_proxy,
        )
        region_ips = self.extract_regions(rpc_service)
        service, _ = self.make_RackProxy_ExternalService(rpc_service, reactor)

        write_config = self.patch_autospec(
            external.proxy_config, "write_config"
        )
        service_monitor = self.patch_autospec(external, "service_monitor")

        yield service.startService()
        self.addCleanup((yield service.stopService))

        yield service._orig_tryUpdate()

        expected_peers = sorted(
            ["http://%s:%s" % (ip, proxy_port) for ip in region_ips]
        )
        self.assertThat(
            write_config,
            MockCalledOnceWith(
                allowed_cidrs,
                peer_proxies=expected_peers,
                prefer_v4_proxy=proxy_prefer_v4_proxy,
                maas_proxy_port=proxy_port,
            ),
        )
        self.assertThat(
            service_monitor.reloadService, MockCalledOnceWith("proxy_rack")
        )
        # If the configuration has not changed then a second call to
        # `_tryUpdate` does not result in another call to `_configure`.
        yield service._orig_tryUpdate()
        self.assertThat(
            write_config,
            MockCalledOnceWith(
                allowed_cidrs,
                peer_proxies=expected_peers,
                prefer_v4_proxy=proxy_prefer_v4_proxy,
                maas_proxy_port=proxy_port,
            ),
        )
        self.assertThat(
            service_monitor.reloadService, MockCalledOnceWith("proxy_rack")
        )

    @inlineCallbacks
    def test_sets_proxy_rack_service_to_any_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, proxy = self.make_RackProxy_ExternalService(
            rpc_service, reactor
        )
        self.patch_autospec(proxy, "_configure")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(proxy._configuration, Is(None))

        with TwistedLoggerFixture() as logger:
            yield service.startService()
            self.addCleanup((yield service.stopService))
            yield service._orig_tryUpdate()

        # Ensure that the service was set to any.
        service = service_monitor.getServiceByName("proxy_rack")
        self.assertEqual(
            (SERVICE_STATE.ANY, "managed by the region"),
            service.getExpectedState(),
        )
        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the DNS server
        # on a region+rack.
        self.assertThat(
            proxy._configuration, IsInstance(external._ProxyConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(proxy._configure, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))
Example #16
0
class TestSendPodCommissioningResults(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @inlineCallbacks
    def test_unknown_pod_raises_UnknownPodType(self):
        with ExpectedException(exceptions.UnknownPodType):
            yield pods.send_pod_commissioning_results(
                pod_type=factory.make_name("type"),
                context={},
                pod_id=random.randint(1, 10),
                name=factory.make_name("name"),
                system_id=factory.make_name("system_id"),
                consumer_key=factory.make_name("consumer_key"),
                token_key=factory.make_name("token_key"),
                token_secret=factory.make_name("token_secret"),
                metadata_url=urlparse(factory.make_url()),
            )

    @inlineCallbacks
    def test_handles_driver_not_returning_Deferred(self):
        pod_type = factory.make_name("type")
        fake_driver = MagicMock()
        fake_driver.get_commissioning_data.return_value = None
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                f"bad pod driver '{pod_type}'; 'get_commissioning_data' did not "
                "return Deferred."
            ),
        ):
            yield pods.send_pod_commissioning_results(
                pod_type=pod_type,
                context={},
                pod_id=random.randint(1, 10),
                name=factory.make_name("name"),
                system_id=factory.make_name("system_id"),
                consumer_key=factory.make_name("consumer_key"),
                token_key=factory.make_name("token_key"),
                token_secret=factory.make_name("token_secret"),
                metadata_url=urlparse(factory.make_url()),
            )

    @inlineCallbacks
    def test_sends_results(self):
        mock_signal = self.patch_autospec(pods, "signal")
        consumer_key = factory.make_name("consumer_key")
        token_key = factory.make_name("token_key")
        token_secret = factory.make_name("token_secret")
        metadata_url = factory.make_url()
        filename1 = factory.make_name("filename1")
        data1 = {factory.make_name("key1"): factory.make_name("value1")}
        filename2 = factory.make_name("filename2")
        data2 = {factory.make_name("key2"): factory.make_name("value2")}
        fake_driver = MagicMock()
        fake_driver.get_commissioning_data.return_value = succeed(
            {filename1: data1, filename2: data2}
        )
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        ret = yield pods.send_pod_commissioning_results(
            pod_type=factory.make_name("type"),
            context={},
            pod_id=random.randint(1, 10),
            name=factory.make_name("name"),
            system_id=factory.make_name("system_id"),
            consumer_key=consumer_key,
            token_key=token_key,
            token_secret=token_secret,
            metadata_url=urlparse(metadata_url),
        )
        self.assertDictEqual({}, ret)
        self.assertThat(
            mock_signal,
            MockCallsMatch(
                call(
                    url=metadata_url,
                    credentials=Credentials(
                        consumer_key=consumer_key,
                        token_key=token_key,
                        token_secret=token_secret,
                    ),
                    status="WORKING",
                    files={
                        filename1: json.dumps(data1, indent=4).encode(),
                        f"{filename1}.out": json.dumps(
                            data1, indent=4
                        ).encode(),
                        f"{filename1}.err": b"",
                        f"{filename1}.yaml": b"",
                    },
                    exit_status=0,
                    error=f"Finished {filename1}: 0",
                ),
                call(
                    url=metadata_url,
                    credentials=Credentials(
                        consumer_key=consumer_key,
                        token_key=token_key,
                        token_secret=token_secret,
                    ),
                    status="WORKING",
                    files={
                        filename2: json.dumps(data2, indent=4).encode(),
                        f"{filename2}.out": json.dumps(
                            data2, indent=4
                        ).encode(),
                        f"{filename2}.err": b"",
                        f"{filename2}.yaml": b"",
                    },
                    exit_status=0,
                    error=f"Finished {filename2}: 0",
                ),
            ),
        )

    @inlineCallbacks
    def test_sends_results_raises_podactionfail_on_signalexception(self):
        mock_signal = self.patch_autospec(pods, "signal")
        err_msg = factory.make_name("error_message")
        mock_signal.side_effect = SignalException(err_msg)
        name = (factory.make_name("name"),)
        system_id = factory.make_name("system_id")
        fake_driver = MagicMock()
        fake_driver.get_commissioning_data.return_value = succeed(
            {factory.make_name("filename"): factory.make_name("data")}
        )
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                f"Unable to send Pod commissioning information for {name}({system_id}): {err_msg}"
            ),
        ):
            yield pods.send_pod_commissioning_results(
                pod_type=factory.make_name("type"),
                context={},
                pod_id=random.randint(1, 10),
                name=name,
                system_id=system_id,
                consumer_key=factory.make_name("consumer_key"),
                token_key=factory.make_name("token_key"),
                token_secret=factory.make_name("token_secret"),
                metadata_url=urlparse(factory.make_url()),
            )

    @inlineCallbacks
    def test_handles_driver_raising_NotImplementedError(self):
        fake_driver = MagicMock()
        fake_driver.get_commissioning_data.return_value = fail(
            NotImplementedError()
        )
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(NotImplementedError):
            yield pods.send_pod_commissioning_results(
                pod_type=factory.make_name("type"),
                context={},
                pod_id=random.randint(1, 10),
                name=factory.make_name("name"),
                system_id=factory.make_name("system_id"),
                consumer_key=factory.make_name("consumer_key"),
                token_key=factory.make_name("token_key"),
                token_secret=factory.make_name("token_secret"),
                metadata_url=urlparse(factory.make_url()),
            )

    @inlineCallbacks
    def test_handles_driver_raising_any_Exception(self):
        fake_driver = MagicMock()
        fake_exception_type = factory.make_exception_type()
        fake_exception_msg = factory.make_name("error")
        fake_exception = fake_exception_type(fake_exception_msg)
        fake_driver.get_commissioning_data.return_value = fail(fake_exception)
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape("Failed talking to pod: " + fake_exception_msg),
        ):
            yield pods.send_pod_commissioning_results(
                pod_type=factory.make_name("type"),
                context={},
                pod_id=random.randint(1, 10),
                name=factory.make_name("name"),
                system_id=factory.make_name("system_id"),
                consumer_key=factory.make_name("consumer_key"),
                token_key=factory.make_name("token_key"),
                token_secret=factory.make_name("token_secret"),
                metadata_url=urlparse(factory.make_url()),
            )
Example #17
0
class TestRackSyslog(MAASTestCase):
    """Tests for `RackSyslog` for `RackExternalService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5000)

    def setUp(self):
        super().setUp()
        self.patch(
            clusterservice, "get_all_interfaces_definition"
        ).return_value = {}

    def extract_regions(self, rpc_service):
        return frozenset(
            {
                (eventloop, client.address[0])
                for eventloop, client in rpc_service.connections.items()
            }
        )

    def make_RackSyslog_ExternalService(self, rpc_service, reactor):
        syslog = external.RackSyslog()
        service = make_startable_RackExternalService(
            self, rpc_service, reactor, [("syslog", syslog)]
        )
        return service, syslog

    @inlineCallbacks
    def test_getConfiguration_returns_configuration_object(self):
        port = factory.pick_port()
        is_region, is_rack = factory.pick_bool(), factory.pick_bool()
        rpc_service, protocol = yield prepareRegion(
            self, is_region=is_region, is_rack=is_rack, syslog_port=port
        )
        forwarders = self.extract_regions(rpc_service)
        service, syslog = self.make_RackSyslog_ExternalService(
            rpc_service, reactor
        )
        yield service.startService()
        self.addCleanup((yield service.stopService))

        config = yield service._getConfiguration()
        observed = syslog._getConfiguration(
            config.controller_type,
            config.syslog_configuration,
            config.connections,
        )

        self.assertThat(observed, IsInstance(external._SyslogConfiguration))
        self.assertThat(
            observed,
            MatchesStructure.byEquality(
                port=port,
                forwarders=forwarders,
                is_region=is_region,
                is_rack=is_rack,
            ),
        )

    @inlineCallbacks
    def test_tryUpdate_updates_syslog_server(self):
        self.useFixture(MAASRootFixture())
        port = factory.pick_port()
        rpc_service, _ = yield prepareRegion(self, syslog_port=port)
        forwarders = self.extract_regions(rpc_service)
        service, _ = self.make_RackSyslog_ExternalService(rpc_service, reactor)

        write_config = self.patch_autospec(
            external.syslog_config, "write_config"
        )
        service_monitor = self.patch_autospec(external, "service_monitor")

        yield service.startService()
        self.addCleanup((yield service.stopService))

        yield service._orig_tryUpdate()

        expected_forwards = [
            {"name": name, "ip": ip} for name, ip in forwarders
        ]
        self.assertThat(
            write_config,
            MockCalledOnceWith(False, forwarders=expected_forwards, port=port),
        )
        self.assertThat(
            service_monitor.restartService, MockCalledOnceWith("syslog_rack")
        )
        # If the configuration has not changed then a second call to
        # `_tryUpdate` does not result in another call to `_configure`.
        yield service._orig_tryUpdate()
        self.assertThat(
            write_config,
            MockCalledOnceWith(False, forwarders=expected_forwards, port=port),
        )
        self.assertThat(
            service_monitor.restartService, MockCalledOnceWith("syslog_rack")
        )

    @inlineCallbacks
    def test_sets_syslog_rack_service_to_any_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, syslog = self.make_RackSyslog_ExternalService(
            rpc_service, reactor
        )
        self.patch_autospec(syslog, "_configure")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(syslog._configuration, Is(None))

        with TwistedLoggerFixture() as logger:
            yield service.startService()
            self.addCleanup((yield service.stopService))
            yield service._orig_tryUpdate()

        # Ensure that the service was set to any.
        service = service_monitor.getServiceByName("syslog_rack")
        self.assertEqual(
            (SERVICE_STATE.ANY, "managed by the region"),
            service.getExpectedState(),
        )
        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the DNS server
        # on a region+rack.
        self.assertThat(
            syslog._configuration, IsInstance(external._SyslogConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(syslog._configure, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))
Example #18
0
class TestDecomposeMachine(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @inlineCallbacks
    def test_unknown_pod_raises_UnknownPodType(self):
        unknown_type = factory.make_name("unknown")
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        with ExpectedException(exceptions.UnknownPodType):
            yield pods.decompose_machine(
                unknown_type, {}, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_handles_driver_not_returning_Deferred(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.decompose.return_value = None
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'decompose' did not "
                "return Deferred." % fake_driver.name
            ),
        ):
            yield pods.decompose_machine(
                fake_driver.name, {}, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_handles_driver_returning_None(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.decompose.return_value = succeed(None)
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'decompose' "
                "returned invalid result." % fake_driver.name
            ),
        ):
            yield pods.decompose_machine(
                fake_driver.name, {}, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_handles_driver_not_returning_hints(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.decompose.return_value = succeed(object())
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'decompose' "
                "returned invalid result." % fake_driver.name
            ),
        ):
            yield pods.decompose_machine(
                fake_driver.name, {}, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_works_when_driver_returns_hints(self):
        hints = DiscoveredPodHints(
            cores=random.randint(1, 8),
            cpu_speed=random.randint(1000, 2000),
            memory=random.randint(1024, 8192),
            local_storage=0,
        )
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.decompose.return_value = succeed(hints)
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        result = yield pods.decompose_machine(
            fake_driver.name, {}, pod_id=pod_id, name=pod_name
        )
        self.assertEqual({"hints": hints}, result)

    @inlineCallbacks
    def test_handles_driver_raising_NotImplementedError(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.decompose.return_value = fail(NotImplementedError())
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(NotImplementedError):
            yield pods.decompose_machine(
                fake_driver.name, {}, pod_id=pod_id, name=pod_name
            )

    @inlineCallbacks
    def test_handles_driver_raising_any_Exception(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_exception_type = factory.make_exception_type()
        fake_exception_msg = factory.make_name("error")
        fake_exception = fake_exception_type(fake_exception_msg)
        fake_driver.decompose.return_value = fail(fake_exception)
        pod_id = random.randint(1, 10)
        pod_name = factory.make_name("pod")
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape("Failed talking to pod: " + fake_exception_msg),
        ):
            yield pods.decompose_machine(
                fake_driver.name, {}, pod_id=pod_id, name=pod_name
            )
Example #19
0
class TestRECSPowerDriver(MAASTestCase):
    """Tests for RECS|Box custom hardware."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def test_no_missing_packages(self):
        mock = self.patch(has_command_available)
        mock.return_value = True
        driver = RECSPowerDriver()
        missing = driver.detect_missing_packages()
        self.assertItemsEqual([], missing)

    def make_context(self):
        ip = factory.make_name("power_address")
        port = factory.pick_port()
        username = factory.make_name("power_user")
        password = factory.make_name("power_pass")
        node_id = factory.make_name("node_id")
        context = {
            "power_address": ip,
            "power_port": port,
            "power_user": username,
            "power_pass": password,
            "node_id": node_id,
        }
        return ip, port, username, password, node_id, context

    def test_extract_recs_parameters_extracts_parameters(self):
        ip, port, username, password, node_id, context = self.make_context()

        self.assertItemsEqual(
            (ip, port, username, password, node_id),
            extract_recs_parameters(context),
        )

    def test_power_off_calls_power_control_recs(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        power_control_recs_mock = self.patch(recs_power_driver,
                                             "power_control_recs")
        recs_power_driver.power_off(context["node_id"], context)

        self.assertThat(
            power_control_recs_mock,
            MockCalledOnceWith(ip, port, username, password, node_id, "off"),
        )

    def test_power_on_calls_power_control_recs(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        power_control_recs_mock = self.patch(recs_power_driver,
                                             "power_control_recs")
        set_boot_source_recs_mock = self.patch(recs_power_driver,
                                               "set_boot_source_recs")
        recs_power_driver.power_on(context["node_id"], context)

        self.assertThat(
            power_control_recs_mock,
            MockCalledOnceWith(ip, port, username, password, node_id, "on"),
        )
        self.assertThat(
            set_boot_source_recs_mock,
            MockCallsMatch(
                call(ip, port, username, password, node_id, "HDD", True),
                call(ip, port, username, password, node_id, "PXE", False),
            ),
        )

    def test_power_query_calls_power_state_recs(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        power_state_recs_mock = self.patch(recs_power_driver,
                                           "power_state_recs")
        recs_power_driver.power_query(context["node_id"], context)

        self.assertThat(
            power_state_recs_mock,
            MockCalledOnceWith(ip, port, username, password, node_id),
        )

    def test_extract_from_response_finds_element_content(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        response = dedent("""
            <node health="OK" id="RCU_84055620466592_BB_1_0" state="0" />
        """)
        attribute = "id"
        expected = "RCU_84055620466592_BB_1_0"
        output = api.extract_from_response(response, attribute)
        self.assertThat(output, Equals(expected))

    def test_get_gets_response(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        params = [factory.make_string() for _ in range(3)]
        expected = dedent("""
            <node health="OK" id="RCU_84055620466592_BB_1_0" state="0" />
        """)
        response = StringIO(expected)
        self.patch(urllib.request, "urlopen", Mock(return_value=response))
        output = api.get(command, params)
        self.assertEquals(output, expected)

    def test_get_crashes_on_http_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.HTTPError(
            None, None, None, None, None)
        self.assertRaises(PowerConnError, api.get, command, context)

    def test_get_crashes_on_url_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.URLError("URL Error")
        self.assertRaises(PowerConnError, api.get, command, context)

    def test_post_gets_response(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        params = {
            factory.make_string(): factory.make_string()
            for _ in range(3)
        }
        expected = dedent("""
            <node health="OK" id="RCU_84055620466592_BB_1_0" state="0" />
        """)
        response = StringIO(expected)
        self.patch(urllib.request, "urlopen", Mock(return_value=response))
        output = api.post(command, params=params)
        self.assertEquals(output, expected)

    def test_post_crashes_on_http_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.HTTPError(
            None, None, None, None, None)
        self.assertRaises(PowerConnError, api.post, command, context)

    def test_post_crashes_on_url_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.URLError("URL Error")
        self.assertRaises(PowerConnError, api.post, command, context)

    def test_put_gets_response(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        params = {
            factory.make_string(): factory.make_string()
            for _ in range(3)
        }
        expected = dedent("""
            <node health="OK" id="RCU_84055620466592_BB_1_0" state="0" />
        """)
        response = StringIO(expected)
        self.patch(urllib.request, "urlopen", Mock(return_value=response))
        output = api.put(command, params=params)
        self.assertEquals(output, expected)

    def test_put_crashes_on_http_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.HTTPError(
            None, None, None, None, None)
        self.assertRaises(PowerConnError, api.put, command, context)

    def test_put_crashes_on_url_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        command = factory.make_string()
        mock_urlopen = self.patch(urllib.request, "urlopen")
        mock_urlopen.side_effect = urllib.error.URLError("URL Error")
        self.assertRaises(PowerConnError, api.put, command, context)

    def test_get_node_power_state_returns_state(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        expected = dedent("""
            <node health="OK" id="RCU_84055620466592_BB_1_0" state="1" />
        """)
        response = StringIO(expected)
        self.patch(urllib.request, "urlopen", Mock(return_value=response))
        state = api.get_node_power_state("RCU_84055620466592_BB_1_0")
        self.assertEquals(state, "1")

    def test_set_boot_source_sets_device(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        boot_source = "2"
        boot_persistent = "false"
        params = {"source": boot_source, "persistent": boot_persistent}
        mock_put = self.patch(api, "put")
        api.set_boot_source(node_id, boot_source, boot_persistent)
        self.assertThat(
            mock_put,
            MockCalledOnceWith("node/%s/manage/set_bootsource" % node_id,
                               params=params),
        )

    def test_set_boot_source_recs_calls_set_boot_source(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_set_boot_source = self.patch(RECSAPI, "set_boot_source")
        boot_source = "HDD"
        boot_persistent = "false"
        recs_power_driver.set_boot_source_recs(ip, port, username, password,
                                               node_id, boot_source,
                                               boot_persistent)
        self.assertThat(
            mock_set_boot_source,
            MockCalledOnceWith(node_id, boot_source, boot_persistent),
        )

    def test_get_nodes_gets_nodes(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        response = dedent("""
            <nodeList>
                <node architecture="x86" baseBoardId="RCU_84055620466592_BB_1"
                 health="OK" id="RCU_84055620466592_BB_1_0"
                 ipAddressMgmt="169.254.94.58"
                 macAddressMgmt="02:00:4c:4f:4f:50"
                 subnetMaskMgmt="255.255.0.0"
                />
                <node architecture="x86" baseBoardId="RCU_84055620466592_BB_2"
                 health="OK" id="RCU_84055620466592_BB_2_0"
                 ipAddressCompute="169.254.94.59"
                 macAddressCompute="02:00:4c:4f:4f:51"
                 subnetMaskCompute="255.255.0.0"
                />
                <node architecture="arm" baseBoardId="RCU_84055620466592_BB_3"
                 health="OK" id="RCU_84055620466592_BB_3_2"
                 ipAddressMgmt="169.254.94.60"
                 macAddressMgmt="02:00:4c:4f:4f:52"
                 subnetMaskMgmt="255.255.0.0"
                 ipAddressCompute="169.254.94.61"
                 macAddressCompute="02:00:4c:4f:4f:53"
                 subnetMaskCompute="255.255.0.0"
                />
                <node architecture="x86" baseBoardId="RCU_84055620466592_BB_4"
                 health="OK" id="RCU_84055620466592_BB_4_0"
                />
            </nodeList>
        """)
        mock_get = self.patch(api, "get", Mock(return_value=response))
        expected = {
            "RCU_84055620466592_BB_1_0": {
                "macs": ["02:00:4c:4f:4f:50"],
                "arch": "x86",
            },
            "RCU_84055620466592_BB_2_0": {
                "macs": ["02:00:4c:4f:4f:51"],
                "arch": "x86",
            },
            "RCU_84055620466592_BB_3_2": {
                "macs": ["02:00:4c:4f:4f:52", "02:00:4c:4f:4f:53"],
                "arch": "arm",
            },
        }
        output = api.get_nodes()

        self.expectThat(output, Equals(expected))
        self.expectThat(mock_get, MockCalledOnceWith("node"))

    def test_power_on_powers_on_node(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        mock_post = self.patch(api, "post")
        api.set_power_on_node(node_id)
        self.assertThat(
            mock_post, MockCalledOnceWith("node/%s/manage/power_on" % node_id))

    def test_power_off_powers_off_node(self):
        ip, port, username, password, node_id, context = self.make_context()
        api = RECSAPI(ip, port, username, password)
        mock_post = self.patch(api, "post")
        api.set_power_off_node(node_id)
        self.assertThat(
            mock_post,
            MockCalledOnceWith("node/%s/manage/power_off" % node_id))

    def test_power_state_recs_calls_get_node_power_state_on(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_get_node_power_state = self.patch(RECSAPI, "get_node_power_state",
                                               Mock(return_value="1"))
        state = recs_power_driver.power_state_recs(ip, port, username,
                                                   password, node_id)
        self.assertThat(mock_get_node_power_state, MockCalledOnceWith(node_id))
        self.assertThat(state, Equals("on"))

    def test_power_state_recs_calls_get_node_power_state_off(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_get_node_power_state = self.patch(RECSAPI, "get_node_power_state",
                                               Mock(return_value="0"))
        state = recs_power_driver.power_state_recs(ip, port, username,
                                                   password, node_id)
        self.assertThat(mock_get_node_power_state, MockCalledOnceWith(node_id))
        self.assertThat(state, Equals("off"))

    def test_power_state_recs_crashes_on_http_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_get_node_power_state = self.patch(RECSAPI, "get_node_power_state",
                                               Mock(return_value="0"))
        mock_get_node_power_state.side_effect = urllib.error.HTTPError(
            None, None, None, None, None)
        self.assertRaises(
            RECSError,
            recs_power_driver.power_state_recs,
            ip,
            port,
            username,
            password,
            node_id,
        )

    def test_power_state_recs_crashes_on_url_error(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_get_node_power_state = self.patch(RECSAPI, "get_node_power_state",
                                               Mock(return_value="0"))
        mock_get_node_power_state.side_effect = urllib.error.URLError(
            "URL Error")
        self.assertRaises(
            RECSError,
            recs_power_driver.power_state_recs,
            ip,
            port,
            username,
            password,
            node_id,
        )

    def test_power_control_recs_calls_set_power(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        mock_set_power = self.patch(RECSAPI, "_set_power")
        recs_power_driver.power_control_recs(ip, port, username, password,
                                             node_id, "on")
        recs_power_driver.power_control_recs(ip, port, username, password,
                                             node_id, "off")
        self.assertThat(
            mock_set_power,
            MockCallsMatch(call(node_id, "power_on"),
                           call(node_id, "power_off")),
        )

    def test_power_control_recs_crashes_on_invalid_action(self):
        ip, port, username, password, node_id, context = self.make_context()
        recs_power_driver = RECSPowerDriver()
        self.assertRaises(
            RECSError,
            recs_power_driver.power_control_recs,
            ip,
            port,
            username,
            password,
            node_id,
            factory.make_name("action"),
        )

    @inlineCallbacks
    def test_probe_and_enlist_recs_probes_and_enlists(self):
        user = factory.make_name("user")
        ip, port, username, password, node_id, context = self.make_context()
        domain = factory.make_name("domain")
        macs = [factory.make_mac_address() for _ in range(3)]
        mock_get_nodes = self.patch(RECSAPI, "get_nodes")
        mock_get_nodes.return_value = {
            node_id: {
                "macs": macs,
                "arch": "amd64"
            }
        }
        self.patch(RECSAPI, "set_boot_source")
        mock_create_node = self.patch(recs_module, "create_node")
        mock_create_node.side_effect = asynchronous(lambda *args: node_id)
        mock_commission_node = self.patch(recs_module, "commission_node")

        yield deferToThread(
            probe_and_enlist_recs,
            user,
            ip,
            int(port),
            username,
            password,
            True,
            domain,
        )

        self.expectThat(
            mock_create_node,
            MockCalledOnceWith(macs, "amd64", "recs_box", context, domain),
        )
        self.expectThat(mock_commission_node,
                        MockCalledOnceWith(node_id, user))

    @inlineCallbacks
    def test_probe_and_enlist_recs_probes_and_enlists_no_commission(self):
        user = factory.make_name("user")
        ip, port, username, password, node_id, context = self.make_context()
        domain = factory.make_name("domain")
        macs = [factory.make_mac_address() for _ in range(3)]
        mock_get_nodes = self.patch(RECSAPI, "get_nodes")
        mock_get_nodes.return_value = {node_id: {"macs": macs, "arch": "arm"}}
        self.patch(RECSAPI, "set_boot_source")
        mock_create_node = self.patch(recs_module, "create_node")
        mock_create_node.side_effect = asynchronous(lambda *args: node_id)
        mock_commission_node = self.patch(recs_module, "commission_node")

        yield deferToThread(
            probe_and_enlist_recs,
            user,
            ip,
            int(port),
            username,
            password,
            False,
            domain,
        )

        self.expectThat(
            mock_create_node,
            MockCalledOnceWith(macs, "armhf", "recs_box", context, domain),
        )
        self.expectThat(mock_commission_node, MockNotCalled())

    @inlineCallbacks
    def test_probe_and_enlist_recs_get_nodes_failure_http_error(self):
        user = factory.make_name("user")
        ip, port, username, password, node_id, context = self.make_context()
        domain = factory.make_name("domain")
        mock_get_nodes = self.patch(RECSAPI, "get_nodes")
        mock_get_nodes.side_effect = urllib.error.HTTPError(
            None, None, None, None, None)

        with ExpectedException(RECSError):
            yield deferToThread(
                probe_and_enlist_recs,
                user,
                ip,
                int(port),
                username,
                password,
                True,
                domain,
            )

    @inlineCallbacks
    def test_probe_and_enlist_recs_get_nodes_failure_url_error(self):
        user = factory.make_name("user")
        ip, port, username, password, node_id, context = self.make_context()
        domain = factory.make_name("domain")
        mock_get_nodes = self.patch(RECSAPI, "get_nodes")
        mock_get_nodes.side_effect = urllib.error.URLError("URL Error")

        with ExpectedException(RECSError):
            yield deferToThread(
                probe_and_enlist_recs,
                user,
                ip,
                int(port),
                username,
                password,
                True,
                domain,
            )
Example #20
0
class TestDiscoverPod(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @inlineCallbacks
    def test_unknown_pod_raises_UnknownPodType(self):
        unknown_type = factory.make_name("unknown")
        with ExpectedException(exceptions.UnknownPodType):
            yield pods.discover_pod(unknown_type, {})

    @inlineCallbacks
    def test_handles_driver_not_returning_Deferred(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.discover.return_value = None
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'discover' did not "
                "return Deferred." % fake_driver.name
            ),
        ):
            yield pods.discover_pod(fake_driver.name, {})

    @inlineCallbacks
    def test_handles_driver_resolving_to_None(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.discover.return_value = succeed(None)
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape("unable to discover pod information."),
        ):
            yield pods.discover_pod(fake_driver.name, {})

    @inlineCallbacks
    def test_handles_driver_not_resolving_to_DiscoveredPod(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.discover.return_value = succeed({})
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape(
                "bad pod driver '%s'; 'discover' returned "
                "invalid result." % fake_driver.name
            ),
        ):
            yield pods.discover_pod(fake_driver.name, {})

    @inlineCallbacks
    def test_handles_driver_resolving_to_DiscoveredPod(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        discovered_pod = DiscoveredPod(
            architectures=["amd64/generic"],
            cores=random.randint(1, 8),
            cpu_speed=random.randint(1000, 3000),
            memory=random.randint(1024, 8192),
            local_storage=0,
            hints=DiscoveredPodHints(
                cores=random.randint(1, 8),
                cpu_speed=random.randint(1000, 2000),
                memory=random.randint(1024, 8192),
                local_storage=0,
            ),
            machines=[],
        )
        fake_driver.discover.return_value = succeed(discovered_pod)
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        result = yield pods.discover_pod(fake_driver.name, {})
        self.assertEqual({"pod": discovered_pod}, result)

    @inlineCallbacks
    def test_handles_driver_raising_NotImplementedError(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_driver.discover.return_value = fail(NotImplementedError())
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(NotImplementedError):
            yield pods.discover_pod(fake_driver.name, {})

    @inlineCallbacks
    def test_handles_driver_raising_any_Exception(self):
        fake_driver = MagicMock()
        fake_driver.name = factory.make_name("pod")
        fake_exception_type = factory.make_exception_type()
        fake_exception_msg = factory.make_name("error")
        fake_exception = fake_exception_type(fake_exception_msg)
        fake_driver.discover.return_value = fail(fake_exception)
        self.patch(PodDriverRegistry, "get_item").return_value = fake_driver
        with ExpectedException(
            exceptions.PodActionFail,
            re.escape("Failed talking to pod: " + fake_exception_msg),
        ):
            yield pods.discover_pod(fake_driver.name, {})
Example #21
0
class TestRackNetworksMonitoringService(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(debug=False, timeout=5)

    @inlineCallbacks
    def test_runs_refresh_first_time(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.RequestRackRefresh)
        self.addCleanup((yield connecting))

        rpc_service = services.getServiceNamed('rpc')
        service = RackNetworksMonitoringService(rpc_service,
                                                Clock(),
                                                enable_monitoring=False)

        yield service.startService()
        yield service.stopService()

        self.assertThat(
            protocol.RequestRackRefresh,
            MockCalledOnceWith(protocol,
                               system_id=rpc_service.getClient().localIdent))

    @inlineCallbacks
    def test_reports_interfaces_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces)
        self.addCleanup((yield connecting))

        interfaces = {
            "eth0": {
                "type": "physical",
                "mac_address": factory.make_mac_address(),
                "parents": [],
                "links": [],
                "enabled": True,
            }
        }

        rpc_service = services.getServiceNamed('rpc')
        service = RackNetworksMonitoringService(rpc_service,
                                                Clock(),
                                                enable_monitoring=False)
        service.getInterfaces = lambda: succeed(interfaces)
        # Put something in the cache. This tells recordInterfaces that refresh
        # has already run but the interfaces have changed thus they need to be
        # updated.
        service._recorded = {}

        service.startService()
        yield service.stopService()

        self.assertThat(
            protocol.UpdateInterfaces,
            MockCalledOnceWith(protocol,
                               system_id=rpc_service.getClient().localIdent,
                               interfaces=interfaces))

    @inlineCallbacks
    def test_reports_neighbours_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces,
                                                     region.ReportNeighbours)
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed('rpc')
        service = RackNetworksMonitoringService(rpc_service,
                                                Clock(),
                                                enable_monitoring=False)
        neighbours = [{"ip": factory.make_ip_address()}]
        yield service.reportNeighbours(neighbours)
        self.assertThat(
            protocol.ReportNeighbours,
            MockCalledOnceWith(protocol,
                               system_id=rpc_service.getClient().localIdent,
                               neighbours=neighbours))

    @inlineCallbacks
    def test_reports_mdns_to_region(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces,
                                                     region.ReportMDNSEntries)
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed('rpc')
        service = RackNetworksMonitoringService(rpc_service,
                                                Clock(),
                                                enable_monitoring=False)
        mdns = [{
            'interface': 'eth0',
            'hostname': 'boggle.example.com',
            'address': factory.make_ip_address(),
        }]
        yield service.reportMDNSEntries(mdns)
        self.assertThat(
            protocol.ReportMDNSEntries,
            MockCalledOnceWith(protocol,
                               system_id=rpc_service.getClient().localIdent,
                               mdns=mdns))

    @inlineCallbacks
    def test_asks_region_for_monitoring_state(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateInterfaces,
                                                     region.GetDiscoveryState)
        self.addCleanup((yield connecting))
        rpc_service = services.getServiceNamed('rpc')
        reactor = Clock()
        service = RackNetworksMonitoringService(rpc_service,
                                                reactor,
                                                enable_monitoring=False)
        protocol.GetDiscoveryState.return_value = {'interfaces': {}}
        # Put something in the cache. This tells recordInterfaces that refresh
        # has already run but the interfaces have changed thus they need to be
        # updated.
        service._recorded = {}
        yield service.startService()
        yield maybeDeferred(service.getDiscoveryState)
        yield service.stopService()
        self.assertThat(
            protocol.GetDiscoveryState,
            MockCalledOnceWith(protocol,
                               system_id=rpc_service.getClient().localIdent))
Example #22
0
class TestRedfishPowerDriver(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def test_missing_packages(self):
        # there's nothing to check for, just confirm it returns []
        driver = RedfishPowerDriver()
        missing = driver.detect_missing_packages()
        self.assertItemsEqual([], missing)

    def test_get_url_with_ip(self):
        driver = RedfishPowerDriver()
        context = make_context()
        ip = context.get("power_address").encode("utf-8")
        expected_url = b"https://%s" % ip
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test_get_url_with_https(self):
        driver = RedfishPowerDriver()
        context = make_context()
        context["power_address"] = join("https://", context["power_address"])
        expected_url = context.get("power_address").encode("utf-8")
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test_get_url_with_http(self):
        driver = RedfishPowerDriver()
        context = make_context()
        context["power_address"] = join("http://", context["power_address"])
        expected_url = context.get("power_address").encode("utf-8")
        url = driver.get_url(context)
        self.assertEqual(expected_url, url)

    def test__make_auth_headers(self):
        power_user = factory.make_name("power_user")
        power_pass = factory.make_name("power_pass")
        creds = "%s:%s" % (power_user, power_pass)
        authorization = b64encode(creds.encode("utf-8"))
        attributes = {
            b"User-Agent": [b"MAAS"],
            b"Accept": [b"application/json"],
            b"Authorization": [b"Basic " + authorization],
            b"Content-Type": [b"application/json; charset=utf-8"],
        }
        driver = RedfishPowerDriver()
        headers = driver.make_auth_headers(power_user, power_pass)
        self.assertEquals(headers, Headers(attributes))

    @inlineCallbacks
    def test_get_node_id_trailing_slash(self):
        driver = RedfishPowerDriver()
        url = driver.get_url(make_context())
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        mock_readBody.return_value = succeed(
            json.dumps({
                "Members": [{
                    "@odata.id": "/redfish/v1/Systems/1/"
                }]
            }).encode("utf-8"))

        node_id = yield driver.get_node_id(url, {})
        self.assertEquals(b"1", node_id)

    @inlineCallbacks
    def test_get_node_id_no_trailing_slash(self):
        driver = RedfishPowerDriver()
        url = driver.get_url(make_context())
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        mock_readBody.return_value = succeed(
            json.dumps({
                "Members": [{
                    "@odata.id": "/redfish/v1/Systems/1"
                }]
            }).encode("utf-8"))

        node_id = yield driver.get_node_id(url, {})
        self.assertEquals(b"1", node_id)

    @inlineCallbacks
    def test_redfish_request_renders_response(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        mock_readBody.return_value = succeed(
            json.dumps(SAMPLE_JSON_SYSTEMS).encode("utf-8"))
        expected_response = SAMPLE_JSON_SYSTEMS

        response, headers = yield driver.redfish_request(b"GET", uri, headers)
        self.assertEquals(expected_response, response)
        self.assertEquals(expected_headers.headers, headers)

    @inlineCallbacks
    def test_wrap_redfish_request_retries_404s_trailing_slash(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.NOT_FOUND
        expected_headers.headers = "Testing Headers"
        happy_headers = Mock()
        happy_headers.code = HTTPStatus.OK
        happy_headers.headers = "Testing Headers"
        mock_agent.return_value.request.side_effect = [
            succeed(expected_headers),
            succeed(happy_headers),
        ]
        mock_readBody = self.patch(redfish_module, "readBody")
        mock_readBody.return_value = succeed(
            json.dumps(SAMPLE_JSON_SYSTEMS).encode("utf-8"))
        expected_response = SAMPLE_JSON_SYSTEMS

        response, return_headers = yield driver.redfish_request(
            b"GET", uri, headers)
        self.assertThat(
            mock_agent.return_value.request,
            MockCallsMatch(
                call(b"GET", uri, headers, None),
                call(b"GET", uri + "/".encode("utf-8"), headers, None),
            ),
        )
        self.assertEquals(expected_response, response)
        self.assertEquals(expected_headers.headers, return_headers)

    @inlineCallbacks
    def test_redfish_request_raises_invalid_json_error(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        mock_readBody.return_value = succeed(
            '{"invalid": "json"'.encode("utf-8"))
        with ExpectedException(PowerActionError):
            yield driver.redfish_request(b"GET", uri, headers)

    @inlineCallbacks
    def test_redfish_request_continues_partial_download_error(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        error = PartialDownloadError(
            response=json.dumps(SAMPLE_JSON_SYSTEMS).encode("utf-8"),
            code=HTTPStatus.OK,
        )
        mock_readBody.return_value = fail(error)
        expected_response = SAMPLE_JSON_SYSTEMS

        response, headers = yield driver.redfish_request(b"GET", uri, headers)
        self.assertEquals(expected_response, response)
        self.assertEquals(expected_headers.headers, headers)

    @inlineCallbacks
    def test_redfish_request_raises_failures(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.OK
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")
        error = PartialDownloadError(
            response=json.dumps(SAMPLE_JSON_SYSTEMS).encode("utf-8"),
            code=HTTPStatus.NOT_FOUND,
        )
        mock_readBody.return_value = fail(error)

        with ExpectedException(PartialDownloadError):
            yield driver.redfish_request(b"GET", uri, headers)
        self.assertThat(mock_readBody, MockCalledOnceWith(expected_headers))

    @inlineCallbacks
    def test_redfish_request_raises_error_on_response_code_above_400(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        uri = join(url, b"redfish/v1/Systems")
        headers = driver.make_auth_headers(**context)
        mock_agent = self.patch(redfish_module, "Agent")
        mock_agent.return_value.request = Mock()
        expected_headers = Mock()
        expected_headers.code = HTTPStatus.BAD_REQUEST
        expected_headers.headers = "Testing Headers"
        mock_agent.return_value.request.return_value = succeed(
            expected_headers)
        mock_readBody = self.patch(redfish_module, "readBody")

        with ExpectedException(PowerActionError):
            yield driver.redfish_request(b"GET", uri, headers)
        self.assertThat(mock_readBody, MockNotCalled())

    @inlineCallbacks
    def test_power_issues_power_reset(self):
        driver = RedfishPowerDriver()
        context = make_context()
        power_change = factory.make_name("power_change")
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b"1"
        mock_file_body_producer = self.patch(redfish_module,
                                             "FileBodyProducer")
        payload = FileBodyProducer(
            BytesIO(
                json.dumps({
                    "ResetType": "%s" % power_change
                }).encode("utf-8")))
        mock_file_body_producer.return_value = payload
        mock_redfish_request = self.patch(driver, "redfish_request")
        expected_uri = join(url, REDFISH_POWER_CONTROL_ENDPOINT % node_id)
        yield driver.power(power_change, url, node_id, headers)
        self.assertThat(
            mock_redfish_request,
            MockCalledOnceWith(b"POST", expected_uri, headers, payload),
        )

    @inlineCallbacks
    def test__set_pxe_boot(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        node_id = b"1"
        headers = driver.make_auth_headers(**context)
        mock_file_body_producer = self.patch(redfish_module,
                                             "FileBodyProducer")
        payload = FileBodyProducer(
            BytesIO(
                json.dumps({
                    "Boot": {
                        "BootSourceOverrideEnabled": "Once",
                        "BootSourceOverrideTarget": "Pxe",
                    }
                }).encode("utf-8")))
        mock_file_body_producer.return_value = payload
        mock_redfish_request = self.patch(driver, "redfish_request")

        yield driver.set_pxe_boot(url, node_id, headers)
        self.assertThat(
            mock_redfish_request,
            MockCalledOnceWith(
                b"PATCH",
                join(url, b"redfish/v1/Systems/%s" % node_id),
                headers,
                payload,
            ),
        )

    @inlineCallbacks
    def test__power_on(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b"1"
        mock_redfish_request = self.patch(driver, "redfish_request")
        mock_redfish_request.return_value = (SAMPLE_JSON_SYSTEMS, None)
        mock_set_pxe_boot = self.patch(driver, "set_pxe_boot")
        mock_power_query = self.patch(driver, "power_query")
        mock_power_query.return_value = "on"
        mock_power = self.patch(driver, "power")

        yield driver.power_on(node_id, context)
        self.assertThat(mock_set_pxe_boot,
                        MockCalledOnceWith(url, node_id, headers))
        self.assertThat(mock_power_query, MockCalledOnceWith(node_id, context))
        self.assertThat(
            mock_power,
            MockCallsMatch(
                call("ForceOff", url, node_id, headers),
                call("On", url, node_id, headers),
            ),
        )

    @inlineCallbacks
    def test__power_off(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b"1"
        mock_redfish_request = self.patch(driver, "redfish_request")
        mock_redfish_request.return_value = (SAMPLE_JSON_SYSTEMS, None)
        mock_set_pxe_boot = self.patch(driver, "set_pxe_boot")
        mock_power_query = self.patch(driver, "power_query")
        mock_power_query.return_value = "on"
        mock_power = self.patch(driver, "power")

        yield driver.power_off(node_id, context)
        self.assertThat(mock_set_pxe_boot,
                        MockCalledOnceWith(url, node_id, headers))
        self.assertThat(mock_power,
                        MockCalledOnceWith("ForceOff", url, node_id, headers))

    @inlineCallbacks
    def test__power_off_already_off(self):
        driver = RedfishPowerDriver()
        context = make_context()
        url = driver.get_url(context)
        headers = driver.make_auth_headers(**context)
        node_id = b"1"
        mock_redfish_request = self.patch(driver, "redfish_request")
        mock_redfish_request.return_value = (SAMPLE_JSON_SYSTEMS, None)
        mock_set_pxe_boot = self.patch(driver, "set_pxe_boot")
        mock_power_query = self.patch(driver, "power_query")
        mock_power_query.return_value = "off"
        mock_power = self.patch(driver, "power")

        yield driver.power_off(node_id, context)
        self.assertThat(mock_set_pxe_boot,
                        MockCalledOnceWith(url, node_id, headers))
        self.assertThat(mock_power, MockNotCalled())

    @inlineCallbacks
    def test_power_query_queries_on(self):
        driver = RedfishPowerDriver()
        power_change = "On"
        system_id = factory.make_name("system_id")
        context = make_context()
        mock_redfish_request = self.patch(driver, "redfish_request")
        NODE_POWERED_ON = deepcopy(SAMPLE_JSON_SYSTEM)
        NODE_POWERED_ON["PowerState"] = "On"
        mock_redfish_request.side_effect = [
            (SAMPLE_JSON_SYSTEMS, None),
            (NODE_POWERED_ON, None),
        ]
        power_state = yield driver.power_query(system_id, context)
        self.assertEquals(power_state, power_change.lower())

    @inlineCallbacks
    def test_power_query_queries_off(self):
        driver = RedfishPowerDriver()
        power_change = "Off"
        system_id = factory.make_name("system_id")
        context = make_context()
        mock_redfish_request = self.patch(driver, "redfish_request")
        mock_redfish_request.side_effect = [
            (SAMPLE_JSON_SYSTEMS, None),
            (SAMPLE_JSON_SYSTEM, None),
        ]
        power_state = yield driver.power_query(system_id, context)
        self.assertEquals(power_state, power_change.lower())
Example #23
0
class TestWindowsPXEBootMethod(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        self.patch(windows_module, "get_hivex_module")
        super(TestWindowsPXEBootMethod, self).setUp()

    def test_clean_path(self):
        method = WindowsPXEBootMethod()
        parts = [factory.make_string() for _ in range(3)]
        dirty_path = "\\".join(parts)
        valid_path = dirty_path.lower().replace("\\", "/")
        clean_path = method.clean_path(dirty_path)
        self.assertEqual(valid_path, clean_path)

    def test_clean_path_strip_boot(self):
        method = WindowsPXEBootMethod()
        dirty_path = "\\Boot\\BCD"
        clean_path = method.clean_path(dirty_path)
        self.assertEqual("bcd", clean_path)

    def test_get_node_info(self):
        method = WindowsPXEBootMethod()
        mac = factory.make_mac_address()
        self.patch(windows_module, "get_remote_mac").return_value = mac
        mock_request_node_info = self.patch(
            windows_module, "request_node_info_by_mac_address")
        method.get_node_info()
        self.assertThat(mock_request_node_info, MockCalledOnceWith(mac))

    @inlineCallbacks
    def test_match_path_pxelinux(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": "install",
            "osystem": "windows",
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "pxelinux.0")
        self.assertEqual(mock_mac, params["mac"])
        self.assertEqual(method.bootloader_path, params["path"])

    @inlineCallbacks
    def test_match_path_pxelinux_only_on_install(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": factory.make_string(),
            "osystem": "windows",
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "pxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_pxelinux_missing_hivex(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": factory.make_string(),
            "osystem": "windows",
            "mac": mock_mac,
        }

        self.patch(windows_module, "HAVE_HIVEX")
        params = yield method.match_path(None, "pxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_pxelinux_only_on_windows(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": "install",
            "osystem": factory.make_string(),
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "pxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_pxelinux_get_node_info_None(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = None

        params = yield method.match_path(None, "pxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_lpxelinux(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": "install",
            "osystem": "windows",
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "lpxelinux.0")
        self.assertEqual(mock_mac, params["mac"])
        self.assertEqual(method.bootloader_path, params["path"])

    @inlineCallbacks
    def test_match_path_lpxelinux_only_on_install(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": factory.make_string(),
            "osystem": "windows",
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "lpxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_lpxelinux_missing_hivex(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": factory.make_string(),
            "osystem": "windows",
            "mac": mock_mac,
        }

        self.patch(windows_module, "HAVE_HIVEX")
        params = yield method.match_path(None, "lpxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_lpxelinux_only_on_windows(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = {
            "purpose": "install",
            "osystem": factory.make_string(),
            "mac": mock_mac,
        }

        params = yield method.match_path(None, "lpxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_lpxelinux_get_node_info_None(self):
        method = WindowsPXEBootMethod()
        method.remote_path = factory.make_string()
        mock_get_node_info = self.patch(method, "get_node_info")
        mock_get_node_info.return_value = None

        params = yield method.match_path(None, "lpxelinux.0")
        self.assertEqual(params, None)

    @inlineCallbacks
    def test_match_path_static_file(self):
        method = WindowsPXEBootMethod()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(windows_module, "get_remote_mac")
        mock_get_node_info.return_value = mock_mac

        params = yield method.match_path(None, "bootmgr.exe")
        self.assertEqual(mock_mac, params["mac"])
        self.assertEqual("bootmgr.exe", params["path"])

    @inlineCallbacks
    def test_match_path_static_file_clean_path(self):
        method = WindowsPXEBootMethod()
        mock_mac = factory.make_mac_address()
        mock_get_node_info = self.patch(windows_module, "get_remote_mac")
        mock_get_node_info.return_value = mock_mac

        params = yield method.match_path(None, "\\Boot\\BCD")
        self.assertEqual(mock_mac, params["mac"])
        self.assertEqual("bcd", params["path"])

    def test_get_reader_bcd(self):
        method = WindowsPXEBootMethod()
        mock_compose_bcd = self.patch(method, "compose_bcd")
        local_host = factory.make_ipv4_address()
        kernel_params = make_kernel_parameters(osystem="windows")

        method.get_reader(None,
                          kernel_params,
                          path="bcd",
                          local_host=local_host)
        self.assertThat(mock_compose_bcd,
                        MockCalledOnceWith(kernel_params, local_host))

    def test_get_reader_static_file(self):
        method = WindowsPXEBootMethod()
        mock_path = factory.make_name("path")
        mock_output_static = self.patch(method, "output_static")
        kernel_params = make_kernel_parameters(osystem="windows")

        method.get_reader(None, kernel_params, path=mock_path)
        self.assertThat(mock_output_static,
                        MockCalledOnceWith(kernel_params, mock_path))

    def test_compose_preseed_url(self):
        url = "http://localhost/MAAS"
        expected = "http:\\\\localhost\\^M^A^A^S"
        method = WindowsPXEBootMethod()
        output = method.compose_preseed_url(url)
        self.assertEqual(expected, output)

    def test_compose_bcd(self):
        method = WindowsPXEBootMethod()
        local_host = factory.make_ipv4_address()
        kernel_params = make_kernel_parameters()

        fake_output = factory.make_string().encode("utf-8")
        self.patch(os.path, "isfile").return_value = True
        self.patch(shutil, "copyfile")
        self.patch(windows_module, "Bcd")

        # https://bugs.python.org/issue23004 -- mock_open() should allow
        # reading binary data -- prevents the use of mock_open() here.
        self.patch(windows_module, "open")
        windows_module.open.return_value = io.BytesIO(fake_output)
        output = method.compose_bcd(kernel_params, local_host)
        self.assertThat(windows_module.open, MockCalledOnceWith(ANY, "rb"))

        self.assertTrue(isinstance(output, BytesReader))
        self.assertEqual(fake_output, output.read(-1))

    def test_compose_bcd_missing_template(self):
        method = WindowsPXEBootMethod()
        self.patch(method, "get_resource_path").return_value = ""
        local_host = factory.make_ipv4_address()
        kernel_params = make_kernel_parameters()

        self.assertRaises(BootMethodError, method.compose_bcd, kernel_params,
                          local_host)

    def test_get_resouce_path(self):
        fake_tftproot = self.make_dir()
        self.useFixture(ClusterConfigurationFixture(tftp_root=fake_tftproot))
        method = WindowsPXEBootMethod()
        fake_path = factory.make_name("path")
        fake_kernelparams = make_kernel_parameters()
        result = method.get_resource_path(fake_kernelparams, fake_path)
        expected = os.path.join(
            fake_tftproot,
            "windows",
            fake_kernelparams.arch,
            fake_kernelparams.subarch,
            fake_kernelparams.release,
            fake_kernelparams.label,
            fake_path,
        )
        self.assertEqual(expected, result)

    def test_output_static(self):
        method = WindowsPXEBootMethod()
        contents = factory.make_bytes()
        temp_dir = self.make_dir()
        filename = factory.make_file(temp_dir, "resource", contents=contents)
        self.patch(method, "get_resource_path").return_value = filename
        result = method.output_static(None, None)
        self.assertIsInstance(result, FilesystemReader)
        self.assertEqual(contents, result.read(10000))
Example #24
0
class TestRackExternalService(MAASTestCase):
    """Tests for `RackExternalService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5000)

    def setUp(self):
        super().setUp()
        self.patch(
            clusterservice, "get_all_interfaces_definition"
        ).return_value = {}

    def test_service_uses__tryUpdate_as_periodic_function(self):
        service = external.RackExternalService(
            StubClusterClientService(), reactor
        )
        self.assertThat(service.call, Equals((service._tryUpdate, (), {})))

    def test_service_iterates_on_low_interval(self):
        service = external.RackExternalService(
            StubClusterClientService(), reactor
        )
        self.assertThat(service.step, Equals(service.INTERVAL_LOW))

    @inlineCallbacks
    def test_getConfiguration_updates_interval_to_high(self):
        rpc_service, protocol = yield prepareRegion(self)
        service = make_startable_RackExternalService(
            self, rpc_service, reactor, []
        )

        yield service.startService()
        self.addCleanup((yield service.stopService))

        yield service._orig_tryUpdate()

        self.assertThat(service.step, Equals(service.INTERVAL_HIGH))
        self.assertThat(service._loop.interval, Equals(service.INTERVAL_HIGH))

    @inlineCallbacks
    def test_is_silent_and_does_nothing_when_region_is_not_available(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        ntp = external.RackNTP()
        service = make_startable_RackExternalService(
            self, StubClusterClientService(), reactor, [("NTP", ntp)]
        )
        self.patch_autospec(ntp, "_tryUpdate")

        yield service.startService()
        self.addCleanup((yield service.stopService))

        with TwistedLoggerFixture() as logger:
            yield service._tryUpdate()

        self.assertThat(logger.output, Equals(""))
        self.assertThat(ntp._tryUpdate, MockNotCalled())

    @inlineCallbacks
    def test_is_silent_and_does_nothing_when_rack_is_not_recognised(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, protocol = yield prepareRegion(self)
        protocol.GetControllerType.side_effect = exceptions.NoSuchNode
        ntp = external.RackNTP()
        service = make_startable_RackExternalService(
            self, StubClusterClientService(), reactor, [("NTP", ntp)]
        )
        self.patch_autospec(ntp, "_tryUpdate")

        yield service.startService()
        self.addCleanup((yield service.stopService))

        with TwistedLoggerFixture() as logger:
            yield service._tryUpdate()

        self.assertThat(logger.output, Equals(""))
        self.assertThat(ntp._tryUpdate, MockNotCalled())
Example #25
0
class TestImportBootImages(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    @defer.inlineCallbacks
    def test_add_to_waiting_if_lock_already_held(self):
        yield concurrency.boot_images.acquire()
        deferToThread = self.patch(boot_images, "deferToThread")
        deferToThread.return_value = defer.succeed(None)
        maas_url = factory.make_simple_http_url()
        d = import_boot_images(sentinel.sources, maas_url)
        self.assertEqual(1, len(concurrency.boot_images.waiting))
        concurrency.boot_images.release()
        yield d
        self.assertThat(
            deferToThread,
            MockCalledOnceWith(
                _run_import,
                sentinel.sources,
                maas_url,
                http_proxy=None,
                https_proxy=None,
            ),
        )

    @defer.inlineCallbacks
    def test_never_more_than_one_waiting(self):
        yield concurrency.boot_images.acquire()
        deferToThread = self.patch(boot_images, "deferToThread")
        deferToThread.return_value = defer.succeed(None)
        maas_url = factory.make_simple_http_url()
        d = import_boot_images(sentinel.sources, maas_url)
        self.assertIsNone(import_boot_images(sentinel.sources, maas_url))
        self.assertEqual(1, len(concurrency.boot_images.waiting))
        concurrency.boot_images.release()
        yield d
        self.assertThat(
            deferToThread,
            MockCalledOnceWith(
                _run_import,
                sentinel.sources,
                maas_url,
                http_proxy=None,
                https_proxy=None,
            ),
        )

    def test_takes_lock_when_running(self):
        clock = Clock()
        deferToThread = self.patch(boot_images, "deferToThread")
        deferToThread.return_value = pause(1, clock)

        # Lock is acquired when import is started.
        import_boot_images(sentinel.sources, factory.make_simple_http_url())
        self.assertTrue(concurrency.boot_images.locked)

        # Lock is released once the download is done.
        clock.advance(1)
        self.assertFalse(concurrency.boot_images.locked)

    @inlineCallbacks
    def test_update_last_image_sync(self):
        get_maas_id = self.patch(boot_images, "get_maas_id")
        get_maas_id.return_value = factory.make_string()
        getRegionClient = self.patch(boot_images, "getRegionClient")
        _run_import = self.patch_autospec(boot_images, "_run_import")
        _run_import.return_value = True
        maas_url = factory.make_simple_http_url()
        yield boot_images._import_boot_images(sentinel.sources, maas_url)
        self.assertThat(
            _run_import,
            MockCalledOnceWith(sentinel.sources, maas_url, None, None),
        )
        self.assertThat(getRegionClient, MockCalledOnceWith())
        self.assertThat(get_maas_id, MockCalledOnceWith())
        client = getRegionClient.return_value
        self.assertThat(
            client,
            MockCalledOnceWith(UpdateLastImageSync, system_id=get_maas_id()),
        )

    @inlineCallbacks
    def test_update_last_image_sync_always_updated(self):
        get_maas_id = self.patch(boot_images, "get_maas_id")
        get_maas_id.return_value = factory.make_string()
        getRegionClient = self.patch(boot_images, "getRegionClient")
        _run_import = self.patch_autospec(boot_images, "_run_import")
        _run_import.return_value = False
        maas_url = factory.make_simple_http_url()
        yield boot_images._import_boot_images(sentinel.sources, maas_url)
        self.assertThat(
            _run_import,
            MockCalledOnceWith(sentinel.sources, maas_url, None, None),
        )
        self.assertThat(getRegionClient, MockCalledOnceWith())
        self.assertThat(get_maas_id, MockCalledOnceWith())
        client = getRegionClient.return_value
        self.assertThat(
            client,
            MockCalledOnceWith(UpdateLastImageSync, system_id=get_maas_id()),
        )

    @inlineCallbacks
    def test_update_last_image_sync_end_to_end(self):
        get_maas_id = self.patch(boot_images, "get_maas_id")
        get_maas_id.return_value = factory.make_string()
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateLastImageSync)
        protocol.UpdateLastImageSync.return_value = succeed({})
        self.addCleanup((yield connecting))
        self.patch_autospec(boot_resources, "import_images")
        boot_resources.import_images.return_value = True
        sources, hosts = make_sources()
        maas_url = factory.make_simple_http_url()
        yield boot_images.import_boot_images(sources, maas_url)
        self.assertThat(
            boot_resources.import_images,
            MockCalledOnceWith(fix_sources_for_cluster(sources, maas_url)),
        )
        self.assertThat(
            protocol.UpdateLastImageSync,
            MockCalledOnceWith(protocol, system_id=get_maas_id()),
        )

    @inlineCallbacks
    def test_update_last_image_sync_end_to_end_import_not_performed(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.UpdateLastImageSync)
        protocol.UpdateLastImageSync.return_value = succeed({})
        self.addCleanup((yield connecting))
        self.patch_autospec(boot_resources, "import_images")
        boot_resources.import_images.return_value = False
        sources, hosts = make_sources()
        maas_url = factory.make_simple_http_url()
        yield boot_images.import_boot_images(sources, maas_url)
        self.assertThat(
            boot_resources.import_images,
            MockCalledOnceWith(fix_sources_for_cluster(sources, maas_url)),
        )
        self.assertThat(protocol.UpdateLastImageSync, MockNotCalled())
Example #26
0
class TestRackNTP(MAASTestCase):
    """Tests for `RackNTP` in `RackExternalService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5000)

    def setUp(self):
        super().setUp()
        self.patch(
            clusterservice, "get_all_interfaces_definition"
        ).return_value = {}

    def make_RackNTP_ExternalService(self, rpc_service, reactor):
        ntp = external.RackNTP()
        service = make_startable_RackExternalService(
            self, rpc_service, reactor, [("NTP", ntp)]
        )
        return service, ntp

    def make_servers_and_peers(self):
        return (
            frozenset(
                {
                    factory.make_ipv4_address(),
                    factory.make_ipv6_address(),
                    factory.make_hostname(),
                }
            ),
            frozenset(
                {factory.make_ipv4_address(), factory.make_ipv6_address()}
            ),
        )

    @inlineCallbacks
    def test_getConfiguration_returns_configuration_object(self):
        is_region, is_rack = factory.pick_bool(), factory.pick_bool()
        servers, peers = self.make_servers_and_peers()
        rpc_service, protocol = yield prepareRegion(
            self,
            is_region=is_region,
            is_rack=is_rack,
            servers=servers,
            peers=peers,
        )
        service, ntp = self.make_RackNTP_ExternalService(rpc_service, reactor)
        config = yield service._getConfiguration()
        observed = ntp._getConfiguration(
            config.controller_type, config.time_configuration
        )

        self.assertThat(observed, IsInstance(external._NTPConfiguration))
        self.assertThat(
            observed,
            MatchesStructure.byEquality(
                references=servers,
                peers=peers,
                is_region=is_region,
                is_rack=is_rack,
            ),
        )

    @inlineCallbacks
    def test_tryUpdate_updates_ntp_server(self):
        self.useFixture(MAASRootFixture())
        servers, peers = self.make_servers_and_peers()
        rpc_service, _ = yield prepareRegion(
            self, servers=servers, peers=peers
        )
        service, ntp = self.make_RackNTP_ExternalService(rpc_service, reactor)
        configure_rack = self.patch_autospec(external, "configure_rack")
        restartService = self.patch_autospec(service_monitor, "restartService")

        config = yield service._getConfiguration()
        yield ntp._tryUpdate(config)
        self.assertThat(configure_rack, MockCalledOnceWith(servers, peers))
        self.assertThat(restartService, MockCalledOnceWith("ntp_rack"))
        # If the configuration has not changed then a second call to
        # `_tryUpdate` does not result in another call to `configure`.
        yield ntp._tryUpdate(config)
        self.assertThat(configure_rack, MockCalledOnceWith(servers, peers))
        self.assertThat(restartService, MockCalledOnceWith("ntp_rack"))

    @inlineCallbacks
    def test_is_silent_does_nothing_but_saves_config_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, ntp = self.make_RackNTP_ExternalService(rpc_service, reactor)
        self.patch_autospec(external, "configure_rack")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(ntp._configuration, Is(None))

        yield service.startService()
        self.addCleanup((yield service.stopService))

        with TwistedLoggerFixture() as logger:
            yield service._orig_tryUpdate()

        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the NTP server
        # on a region+rack.
        self.assertThat(
            ntp._configuration, IsInstance(external._NTPConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(external.configure_rack, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))

    @inlineCallbacks
    def test_sets_ntp_rack_service_to_any_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, ntp = self.make_RackNTP_ExternalService(rpc_service, reactor)
        self.patch_autospec(ntp, "_configure")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(ntp._configuration, Is(None))

        with TwistedLoggerFixture() as logger:
            yield service.startService()
            self.addCleanup((yield service.stopService))
            yield service._orig_tryUpdate()

        # Ensure that the service was set to any.
        service = service_monitor.getServiceByName("ntp_rack")
        self.assertEqual(
            (SERVICE_STATE.ANY, "managed by the region"),
            service.getExpectedState(),
        )
        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the DNS server
        # on a region+rack.
        self.assertThat(
            ntp._configuration, IsInstance(external._NTPConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(ntp._configure, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))
class TestServiceMonitorService(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super().setUp()
        # Reset the all the toggleable services to off.
        for service in service_monitor._services.values():
            if isinstance(service, ToggleableService):
                service.off()
        self.patch(clusterservice,
                   "get_all_interfaces_definition").return_value = {}

    def pick_service(self):
        return random.choice(list(service_monitor._services.values()))

    def patch_rpc_methods(self):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(region.UpdateServices)
        return protocol, connecting

    def test_init_sets_up_timer_correctly(self):
        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    sentinel.clock)
        self.assertThat(
            monitor_service,
            MatchesStructure.byEquality(
                call=(monitor_service.monitorServices, (), {}),
                step=30,
                client_service=sentinel.client_service,
                clock=sentinel.clock,
            ),
        )

    def test_monitorServices_does_not_do_anything_in_dev_environment(self):
        # Belt-n-braces make sure we're in a development environment.
        self.assertTrue(sms.is_dev_environment())

        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    Clock())
        mock_ensureServices = self.patch(service_monitor, "ensureServices")
        with TwistedLoggerFixture() as logger:
            monitor_service.monitorServices()
        self.assertThat(mock_ensureServices, MockNotCalled())
        self.assertDocTestMatches(
            "Skipping check of services; they're not running under the "
            "supervision of systemd.",
            logger.output,
        )

    def test_monitorServices_calls_ensureServices(self):
        # Pretend we're in a production environment.
        self.patch(sms, "is_dev_environment").return_value = False

        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    Clock())
        mock_client = Mock()
        self.patch(monitor_service,
                   "_getConnection").return_value = succeed(mock_client)
        mock_ensureServices = self.patch(service_monitor, "ensureServices")
        monitor_service.monitorServices()
        self.assertThat(mock_ensureServices, MockCalledOnceWith())

    def test_monitorServices_handles_failure(self):
        # Pretend we're in a production environment.
        self.patch(sms, "is_dev_environment").return_value = False

        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    Clock())
        mock_ensureServices = self.patch(monitor_service, "_getConnection")
        mock_ensureServices.return_value = fail(factory.make_exception())
        with TwistedLoggerFixture() as logger:
            monitor_service.monitorServices()
        self.assertDocTestMatches(
            """\
            Failed to monitor services and update region.
            Traceback (most recent call last):
            ...""",
            logger.output,
        )

    @inlineCallbacks
    def test_reports_services_to_region_on_start(self):
        # Pretend we're in a production environment.
        self.patch(sms, "is_dev_environment").return_value = False

        protocol, connecting = self.patch_rpc_methods()
        self.addCleanup((yield connecting))

        class ExampleService(AlwaysOnService):
            name = service_name = snap_service_name = factory.make_name(
                "service")

        service = ExampleService()
        # Inveigle this new service into the service monitor.
        self.addCleanup(service_monitor._services.pop, service.name)
        service_monitor._services[service.name] = service

        state = ServiceState(SERVICE_STATE.ON, "running")
        mock_ensureServices = self.patch(service_monitor, "ensureServices")
        mock_ensureServices.return_value = succeed({service.name: state})

        client = getRegionClient()
        rpc_service = Mock()
        rpc_service.getClientNow.return_value = succeed(client)
        monitor_service = sms.ServiceMonitorService(rpc_service, Clock())
        yield monitor_service.startService()
        yield monitor_service.stopService()

        expected_services = list(monitor_service.ALWAYS_RUNNING_SERVICES)
        expected_services.append({
            "name": service.name,
            "status": "running",
            "status_info": ""
        })
        self.assertThat(
            protocol.UpdateServices,
            MockCalledOnceWith(
                protocol,
                system_id=client.localIdent,
                services=expected_services,
            ),
        )

    @inlineCallbacks
    def test_reports_services_to_region(self):
        # Pretend we're in a production environment.
        self.patch(sms, "is_dev_environment").return_value = False

        protocol, connecting = self.patch_rpc_methods()
        self.addCleanup((yield connecting))

        class ExampleService(AlwaysOnService):
            name = service_name = snap_service_name = factory.make_name(
                "service")

        service = ExampleService()
        # Inveigle this new service into the service monitor.
        self.addCleanup(service_monitor._services.pop, service.name)
        service_monitor._services[service.name] = service

        state = ServiceState(SERVICE_STATE.ON, "running")
        mock_ensureServices = self.patch(service_monitor, "ensureServices")
        mock_ensureServices.return_value = succeed({service.name: state})

        client = getRegionClient()
        rpc_service = Mock()
        rpc_service.getClientNow.return_value = succeed(client)
        monitor_service = sms.ServiceMonitorService(rpc_service, Clock())

        yield monitor_service.startService()
        yield monitor_service.stopService()

        expected_services = list(monitor_service.ALWAYS_RUNNING_SERVICES)
        expected_services.append({
            "name": service.name,
            "status": "running",
            "status_info": ""
        })
        self.assertThat(
            protocol.UpdateServices,
            MockCalledOnceWith(
                protocol,
                system_id=client.localIdent,
                services=expected_services,
            ),
        )

    @inlineCallbacks
    def test_buildServices_includes_always_running_services(self):
        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    Clock())
        observed_services = yield monitor_service._buildServices({})
        self.assertEqual(monitor_service.ALWAYS_RUNNING_SERVICES,
                         observed_services)

    @inlineCallbacks
    def test_buildServices_adds_services_to_always_running_services(self):
        monitor_service = sms.ServiceMonitorService(sentinel.client_service,
                                                    Clock())
        service = self.pick_service()
        state = ServiceState(SERVICE_STATE.ON, "running")
        observed_services = yield monitor_service._buildServices(
            {service.name: state})
        expected_services = list(monitor_service.ALWAYS_RUNNING_SERVICES)
        expected_services.append({
            "name": service.name,
            "status": "running",
            "status_info": ""
        })
        self.assertEqual(expected_services, observed_services)
Example #28
0
class TestRackDNS(MAASTestCase):
    """Tests for `RackDNS` for `RackExternalService`."""

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5000)

    def setUp(self):
        super().setUp()
        self.patch(
            clusterservice, "get_all_interfaces_definition"
        ).return_value = {}

    def make_trusted_networks(self):
        return frozenset(
            {factory.make_ipv4_address(), factory.make_ipv6_address()}
        )

    def extract_regions(self, rpc_service):
        return frozenset(
            {
                client.address[0]
                for _, client in rpc_service.connections.items()
            }
        )

    def make_RackDNS_ExternalService(self, rpc_service, reactor):
        dns = external.RackDNS()
        service = make_startable_RackExternalService(
            self, rpc_service, reactor, [("DNS", dns)]
        )
        return service, dns

    @inlineCallbacks
    def test_getConfiguration_returns_configuration_object(self):
        is_region, is_rack = factory.pick_bool(), factory.pick_bool()
        trusted_networks = self.make_trusted_networks()
        rpc_service, protocol = yield prepareRegion(
            self,
            is_region=is_region,
            is_rack=is_rack,
            trusted_networks=trusted_networks,
        )
        region_ips = self.extract_regions(rpc_service)
        service, dns = self.make_RackDNS_ExternalService(rpc_service, reactor)
        yield service.startService()
        self.addCleanup((yield service.stopService))

        config = yield service._getConfiguration()
        observed = dns._getConfiguration(
            config.controller_type,
            config.dns_configuration,
            config.connections,
        )

        self.assertThat(observed, IsInstance(external._DNSConfiguration))
        self.assertThat(
            observed,
            MatchesStructure.byEquality(
                upstream_dns=region_ips,
                trusted_networks=trusted_networks,
                is_region=is_region,
                is_rack=is_rack,
            ),
        )

    @inlineCallbacks
    def test_tryUpdate_updates_dns_server(self):
        self.useFixture(MAASRootFixture())
        trusted_networks = self.make_trusted_networks()
        rpc_service, _ = yield prepareRegion(
            self, trusted_networks=trusted_networks
        )
        region_ips = self.extract_regions(rpc_service)
        service, _ = self.make_RackDNS_ExternalService(rpc_service, reactor)

        mock_ensureService = self.patch_autospec(
            service_monitor, "ensureService"
        )
        mock_ensureService.side_effect = always_succeed_with(None)

        bind_write_options = self.patch_autospec(
            external, "bind_write_options"
        )
        bind_write_configuration = self.patch_autospec(
            external, "bind_write_configuration"
        )
        bind_reload_with_retries = self.patch_autospec(
            external, "bind_reload_with_retries"
        )

        yield service.startService()
        self.addCleanup((yield service.stopService))

        yield service._orig_tryUpdate()

        self.assertThat(
            bind_write_options,
            MockCalledOnceWith(
                upstream_dns=list(sorted(region_ips)), dnssec_validation="no"
            ),
        )
        self.assertThat(
            bind_write_configuration,
            MockCalledOnceWith([], list(sorted(trusted_networks))),
        )
        self.assertThat(mock_ensureService, MockCalledOnceWith("dns_rack"))
        self.assertThat(bind_reload_with_retries, MockCalledOnceWith())
        # If the configuration has not changed then a second call to
        # `_tryUpdate` does not result in another call to `_configure`.
        yield service._orig_tryUpdate()
        self.assertThat(
            bind_write_options,
            MockCalledOnceWith(
                upstream_dns=list(sorted(region_ips)), dnssec_validation="no"
            ),
        )
        self.assertThat(
            bind_write_configuration,
            MockCalledOnceWith([], list(sorted(trusted_networks))),
        )
        self.assertThat(mock_ensureService, MockCalledOnceWith("dns_rack"))
        self.assertThat(bind_reload_with_retries, MockCalledOnceWith())

    @inlineCallbacks
    def test_is_silent_does_nothing_but_saves_config_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, dns = self.make_RackDNS_ExternalService(rpc_service, reactor)
        self.patch_autospec(dns, "_configure")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(dns._configuration, Is(None))

        with TwistedLoggerFixture() as logger:
            yield service.startService()
            self.addCleanup((yield service.stopService))
            yield service._orig_tryUpdate()

        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the DNS server
        # on a region+rack.
        self.assertThat(
            dns._configuration, IsInstance(external._DNSConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(dns._configure, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))

    @inlineCallbacks
    def test_sets_dns_rack_service_to_any_when_is_region(self):
        # Patch the logger in the clusterservice so no log messages are printed
        # because the tests run in debug mode.
        self.patch(common.log, "debug")
        self.useFixture(MAASRootFixture())
        rpc_service, _ = yield prepareRegion(self, is_region=True)
        service, dns = self.make_RackDNS_ExternalService(rpc_service, reactor)
        self.patch_autospec(dns, "_configure")  # No-op configuration.

        # There is no most recently applied configuration.
        self.assertThat(dns._configuration, Is(None))

        with TwistedLoggerFixture() as logger:
            yield service.startService()
            self.addCleanup((yield service.stopService))
            yield service._orig_tryUpdate()

        # Ensure that the service was set to any.
        service = service_monitor.getServiceByName("dns_rack")
        self.assertEqual(
            (SERVICE_STATE.ANY, "managed by the region"),
            service.getExpectedState(),
        )
        # The most recently applied configuration is set, though it was not
        # actually "applied" because this host was configured as a region+rack
        # controller, and the rack should not attempt to manage the DNS server
        # on a region+rack.
        self.assertThat(
            dns._configuration, IsInstance(external._DNSConfiguration)
        )
        # The configuration was not applied.
        self.assertThat(dns._configure, MockNotCalled())
        # Nothing was logged; there's no need for lots of chatter.
        self.assertThat(logger.output, Equals(""))

    def test_genRegionIps_groups_by_region(self):
        mock_rpc = Mock()
        mock_rpc.connections = {}
        for _ in range(3):
            region_name = factory.make_name("region")
            for _ in range(3):
                pid = random.randint(0, 10000)
                eventloop = "%s:pid=%s" % (region_name, pid)
                ip = factory.make_ip_address()
                mock_conn = Mock()
                mock_conn.address = (ip, random.randint(5240, 5250))
                mock_rpc.connections[eventloop] = mock_conn

        dns = external.RackDNS()
        region_ips = list(dns._genRegionIps(mock_rpc.connections))
        self.assertEqual(3, len(region_ips))

    def test_genRegionIps_always_returns_same_result(self):
        mock_rpc = Mock()
        mock_rpc.connections = {}
        for _ in range(3):
            region_name = factory.make_name("region")
            for _ in range(3):
                pid = random.randint(0, 10000)
                eventloop = "%s:pid=%s" % (region_name, pid)
                ip = factory.make_ip_address()
                mock_conn = Mock()
                mock_conn.address = (ip, random.randint(5240, 5250))
                mock_rpc.connections[eventloop] = mock_conn

        dns = external.RackDNS()
        region_ips = frozenset(dns._genRegionIps(mock_rpc.connections))
        for _ in range(3):
            self.assertEqual(
                region_ips, frozenset(dns._genRegionIps(mock_rpc.connections))
            )
Example #29
0
class TestChangePowerState(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super().setUp()
        self.useFixture(EventTypesAllRegistered())

    @inlineCallbacks
    def patch_rpc_methods(self, return_value={}, side_effect=None):
        fixture = self.useFixture(MockLiveClusterToRegionRPCFixture())
        protocol, connecting = fixture.makeEventLoop(
            region.MarkNodeFailed,
            region.UpdateNodePowerState,
            region.SendEvent,
        )
        protocol.MarkNodeFailed.return_value = return_value
        protocol.MarkNodeFailed.side_effect = side_effect
        self.addCleanup((yield connecting))
        returnValue(protocol.MarkNodeFailed)

    def test_change_power_state_calls_power_change_starting_early_on(self):
        # The first, or one of the first, things that change_power_state()
        # does is write to the node event log via power_change_starting().

        class ArbitraryException(Exception):
            """This allows us to return early from a function."""

        # Raise this exception when power_change_starting() is called, to
        # return early from change_power_state(). This lets us avoid set-up
        # for parts of the function that we're presently not interested in.
        pcs = self.patch_autospec(power, "power_change_starting")
        pcs.return_value = fail(ArbitraryException())

        d = power.change_power_state(
            sentinel.system_id,
            sentinel.hostname,
            sentinel.power_type,
            sentinel.power_change,
            sentinel.context,
        )
        self.assertRaises(ArbitraryException, extract_result, d)
        self.assertThat(
            power.power_change_starting,
            MockCalledOnceWith(
                sentinel.system_id, sentinel.hostname, sentinel.power_change
            ),
        )

    @inlineCallbacks
    def test_handles_power_driver_power_types(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_change = random.choice(["on", "off"])
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        perform_power_driver_change = self.patch_autospec(
            power, "perform_power_driver_change"
        )
        perform_power_driver_query = self.patch_autospec(
            power, "perform_power_driver_query"
        )
        perform_power_driver_query.return_value = succeed(power_change)
        power_change_success = self.patch_autospec(
            power, "power_change_success"
        )
        yield self.patch_rpc_methods()

        yield power.change_power_state(
            system_id, hostname, power_driver.name, power_change, context
        )

        self.expectThat(
            perform_power_driver_change,
            MockCalledOnceWith(
                system_id, hostname, power_driver.name, power_change, context
            ),
        )
        self.expectThat(
            perform_power_driver_query,
            MockCalledOnceWith(
                system_id, hostname, power_driver.name, context
            ),
        )
        self.expectThat(
            power_change_success,
            MockCalledOnceWith(system_id, hostname, power_change),
        )

    @inlineCallbacks
    def test_return_none_when_unqueryable(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [
                driver
                for _, driver in PowerDriverRegistry
                if not driver.queryable
            ]
        )
        power_change = "on"
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        get_item = self.patch(PowerDriverRegistry, "get_item")
        get_item.return_value = MagicMock()
        get_item.return_value.queryable = False
        perform_power_driver_query = self.patch(
            power, "perform_power_driver_query"
        )
        perform_power_driver_query.return_value = succeed(power_change)
        self.patch(power, "power_change_success")
        yield self.patch_rpc_methods()

        result = yield power.change_power_state(
            system_id, hostname, power_driver.name, power_change, context
        )

        self.expectThat(get_item, MockCalledWith(power_driver.name))
        self.expectThat(perform_power_driver_query, MockNotCalled())
        self.expectThat(power.power_change_success, MockNotCalled())
        self.expectThat(result, Equals(None))

    @inlineCallbacks
    def test_calls_power_driver_on_for_power_driver(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_change = "on"
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        get_item = self.patch(PowerDriverRegistry, "get_item")
        perform_power_driver_query = self.patch(
            power, "perform_power_driver_query"
        )
        perform_power_driver_query.return_value = succeed(power_change)
        self.patch(power, "power_change_success")
        yield self.patch_rpc_methods()

        result = yield power.change_power_state(
            system_id, hostname, power_driver.name, power_change, context
        )

        self.expectThat(get_item, MockCalledWith(power_driver.name))
        self.expectThat(
            perform_power_driver_query,
            MockCalledOnceWith(
                system_id, hostname, power_driver.name, context
            ),
        )
        self.expectThat(
            power.power_change_success,
            MockCalledOnceWith(system_id, hostname, power_change),
        )
        self.expectThat(result, Equals("on"))

    @inlineCallbacks
    def test_calls_power_driver_off_for_power_driver(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_change = "off"
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        get_item = self.patch(PowerDriverRegistry, "get_item")
        perform_power_driver_query = self.patch(
            power, "perform_power_driver_query"
        )
        perform_power_driver_query.return_value = succeed(power_change)
        self.patch(power, "power_change_success")
        yield self.patch_rpc_methods()

        result = yield power.change_power_state(
            system_id, hostname, power_driver.name, power_change, context
        )

        self.expectThat(get_item, MockCalledWith(power_driver.name))
        self.expectThat(
            perform_power_driver_query,
            MockCalledOnceWith(
                system_id, hostname, power_driver.name, context
            ),
        )
        self.expectThat(
            power.power_change_success,
            MockCalledOnceWith(system_id, hostname, power_change),
        )
        self.expectThat(result, Equals("off"))

    @inlineCallbacks
    def test_calls_power_driver_cycle_for_power_driver(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_change = "cycle"
        context = {
            factory.make_name("context-key"): factory.make_name("context-val")
        }
        self.patch(power, "is_driver_available").return_value = True
        get_item = self.patch(PowerDriverRegistry, "get_item")
        perform_power_driver_query = self.patch(
            power, "perform_power_driver_query"
        )
        perform_power_driver_query.return_value = succeed("on")
        self.patch(power, "power_change_success")
        yield self.patch_rpc_methods()

        result = yield power.change_power_state(
            system_id, hostname, power_driver.name, power_change, context
        )

        self.expectThat(get_item, MockCalledWith(power_driver.name))
        self.expectThat(
            perform_power_driver_query,
            MockCalledOnceWith(
                system_id, hostname, power_driver.name, context
            ),
        )
        self.expectThat(
            power.power_change_success,
            MockCalledOnceWith(system_id, hostname, "on"),
        )
        self.expectThat(result, Equals("on"))

    @inlineCallbacks
    def test_marks_the_node_broken_if_exception_for_power_driver(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_driver = random.choice(
            [driver for _, driver in PowerDriverRegistry if driver.queryable]
        )
        power_change = "on"
        context = {
            factory.make_name("context-key"): factory.make_name("context-val"),
            "system_id": system_id,
        }
        self.patch(power, "is_driver_available").return_value = True
        exception = PowerError(factory.make_string())
        get_item = self.patch(PowerDriverRegistry, "get_item")
        power_driver = get_item.return_value
        power_driver.on.return_value = fail(exception)

        markNodeBroken = yield self.patch_rpc_methods()

        with ExpectedException(PowerError):
            yield power.change_power_state(
                system_id, hostname, power_driver.name, power_change, context
            )

        error_message = "Power on for the node failed: %s" % (
            get_driver_error_message(exception)
        )
        self.expectThat(
            markNodeBroken,
            MockCalledOnceWith(
                ANY, system_id=system_id, error_description=error_message
            ),
        )
Example #30
0
class TestPowerHelpers(MAASTestCase):

    run_tests_with = MAASTwistedRunTest.make_factory(timeout=5)

    def setUp(self):
        super(TestPowerHelpers, self).setUp()
        self.useFixture(EventTypesAllRegistered())

    def patch_rpc_methods(self):
        fixture = self.useFixture(MockClusterToRegionRPCFixture())
        protocol, io = fixture.makeEventLoop(
            region.MarkNodeFailed,
            region.UpdateNodePowerState,
            region.SendEvent,
        )
        return protocol, io

    def test_power_state_update_calls_UpdateNodePowerState(self):
        system_id = factory.make_name("system_id")
        state = random.choice(["on", "off"])
        protocol, io = self.patch_rpc_methods()
        d = power.power_state_update(system_id, state)
        # This blocks until the deferred is complete
        io.flush()
        self.expectThat(extract_result(d), Equals({}))
        self.assertThat(
            protocol.UpdateNodePowerState,
            MockCalledOnceWith(ANY, system_id=system_id, power_state=state),
        )

    def test_power_change_success_emits_event(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_change = "on"
        protocol, io = self.patch_rpc_methods()
        d = power.power_change_success(system_id, hostname, power_change)
        io.flush()
        self.assertThat(
            protocol.UpdateNodePowerState,
            MockCalledOnceWith(
                ANY, system_id=system_id, power_state=power_change
            ),
        )
        self.assertThat(
            protocol.SendEvent,
            MockCalledOnceWith(
                ANY,
                type_name=EVENT_TYPES.NODE_POWERED_ON,
                system_id=system_id,
                description="",
            ),
        )
        self.assertIsNone(extract_result(d))

    def test_power_change_starting_emits_event(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        power_change = "on"
        protocol, io = self.patch_rpc_methods()
        d = power.power_change_starting(system_id, hostname, power_change)
        io.flush()
        self.assertThat(
            protocol.SendEvent,
            MockCalledOnceWith(
                ANY,
                type_name=EVENT_TYPES.NODE_POWER_ON_STARTING,
                system_id=system_id,
                description="",
            ),
        )
        self.assertIsNone(extract_result(d))

    def test_power_change_failure_emits_event(self):
        system_id = factory.make_name("system_id")
        hostname = factory.make_name("hostname")
        message = factory.make_name("message")
        power_change = "on"
        protocol, io = self.patch_rpc_methods()
        d = power.power_change_failure(
            system_id, hostname, power_change, message
        )
        io.flush()
        self.assertThat(
            protocol.SendEvent,
            MockCalledOnceWith(
                ANY,
                type_name=EVENT_TYPES.NODE_POWER_ON_FAILED,
                system_id=system_id,
                description=message,
            ),
        )
        self.assertIsNone(extract_result(d))