Exemple #1
0
    def check_parsing(self):
        """ Checks that RemoteAccounts are generated correctly from input JSON"""

        node = JsonCluster(
            {
                "nodes": [
                    {"ssh_config": {"host": "hostname"}}]}).alloc(Service.setup_node_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user is None

        ssh_config = {
            "host": "hostname",
            "user": "******",
            "hostname": "localhost",
            "port": 22
        }
        node = JsonCluster({"nodes": [{"hostname": "hostname",
                                       "user": "******",
                                       "ssh_config": ssh_config}]}).alloc(Service.setup_node_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user == "user"

        # check ssh configs
        assert node.account.ssh_config.host == "hostname"
        assert node.account.ssh_config.user == "user"
        assert node.account.ssh_config.hostname == "localhost"
        assert node.account.ssh_config.port == 22
Exemple #2
0
    def check_allocate_free(self):
        cluster = JsonCluster(
            {"nodes": [
                {"ssh_config": {"host": "localhost1"}},
                {"ssh_config": {"host": "localhost2"}},
                {"ssh_config": {"host": "localhost3"}}]})

        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 3)

        nodes = cluster.alloc(Service.setup_node_spec(num_nodes=1))
        nodes_hostnames = self.cluster_hostnames(nodes)
        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 2)

        nodes2 = cluster.alloc(Service.setup_node_spec(num_nodes=2))
        nodes2_hostnames = self.cluster_hostnames(nodes2)
        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 0)

        assert(nodes_hostnames.isdisjoint(nodes2_hostnames))

        cluster.free(nodes)
        assert(cluster.num_available_nodes() == 1)

        cluster.free(nodes2)
        assert(cluster.num_available_nodes() == 3)
Exemple #3
0
    def check_with_changing_cluster_availability(self):
        """Modify cluster usage in between calls to next() """

        scheduler = TestScheduler(self.tc_list, self.cluster)

        # allocate 60 nodes; only test_id 0 should be available
        slots = self.cluster.alloc(Service.setup_node_spec(num_nodes=60))
        assert self.cluster.num_available_nodes() == 40
        t = scheduler.next()
        assert t.test_id == 0
        assert scheduler.peek() is None

        # return 10 nodes, so 50 are available in the cluster
        # next test from the scheduler should be test id 1
        return_slots = slots[: 10]
        keep_slots = slots[10:]
        self.cluster.free(return_slots)
        assert self.cluster.num_available_nodes() == 50
        t = scheduler.next()
        assert t.test_id == 1
        assert scheduler.peek() is None

        # return remaining nodes, so cluster is fully available
        # next test from scheduler should be test id 2
        return_slots = keep_slots
        self.cluster.free(return_slots)
        assert self.cluster.num_available_nodes() == len(self.cluster)
        t = scheduler.next()
        assert t.test_id == 2
Exemple #4
0
 def check_free_too_many(self):
     n = 10
     cluster = FiniteSubcluster(
         [MockFiniteSubclusterNode() for _ in range(n)])
     nodes = cluster.alloc(Service.setup_node_spec(num_nodes=n))
     with pytest.raises(AssertionError):
         nodes.append(object())
         cluster.free(nodes)
Exemple #5
0
    def check_cluster_file_read(self, monkeypatch):
        """check the behavior of VagrantCluster when cluster_file is specified and the file exists.
        VagrantCluster should read cluster information from cluster_file.
        """
        self._set_monkeypatch_attr(monkeypatch)

        # To verify that VagrantCluster reads cluster information from the cluster_file, the
        # content in the file is intentionally made different from that returned by _vagrant_ssh_config().
        nodes_expected = []
        node1_expected = {
            "externally_routable_ip": "127.0.0.3",
            "ssh_config": {
                "host": "worker3",
                "hostname": "127.0.0.3",
                "user": "******",
                "port": 2222,
                "password": "******",
                "identityfile": "/path/to/identfile3"
            }
        }
        nodes_expected.append(node1_expected)

        node2_expected = {
            "externally_routable_ip": "127.0.0.2",
            "ssh_config": {
                "host": "worker2",
                "hostname": "127.0.0.2",
                "user": "******",
                "port": 2223,
                "password": None,
                "identityfile": "/path/to/indentfile2"
            }
        }
        nodes_expected.append(node2_expected)

        cluster_json_expected = {}
        cluster_json_expected["nodes"] = nodes_expected
        json.dump(cluster_json_expected, open(self.cluster_file, 'w+'),
                  indent=2, separators=(',', ': '), sort_keys=True)

        # Load the cluster from the json file we just created
        cluster = VagrantCluster(cluster_file=self.cluster_file)

        assert len(cluster) == 2
        assert cluster.num_available_nodes() == 2
        node2, node3 = cluster.alloc(Service.setup_node_spec(num_nodes=2))

        assert node3.account.hostname == "worker2"
        assert node3.account.user == "vagrant"
        assert node3.account.ssh_hostname == '127.0.0.2'
        assert node3.account.ssh_config.to_json() == node2_expected["ssh_config"]

        assert node2.account.hostname == "worker3"
        assert node2.account.user == "vagrant"
        assert node2.account.ssh_hostname == '127.0.0.3'
        assert node2.account.ssh_config.to_json() == node1_expected["ssh_config"]
Exemple #6
0
    def check_allocate_free(self):
        n = 10
        cluster = FiniteSubcluster(
            [MockFiniteSubclusterNode() for _ in range(n)])
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n

        nodes = cluster.alloc(Service.setup_node_spec(num_nodes=1))
        assert len(nodes) == 1
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 1

        nodes2 = cluster.alloc(Service.setup_node_spec(num_nodes=2))
        assert len(nodes2) == 2
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 3

        cluster.free(nodes)
        assert cluster.num_available_nodes() == n - 2

        cluster.free(nodes2)
        assert cluster.num_available_nodes() == n
Exemple #7
0
    def check_non_empty_cluster_too_small(self):
        """Ensure that scheduler does not return tests if the cluster does not have enough available nodes. """

        scheduler = TestScheduler(self.tc_list, self.cluster)
        assert len(scheduler) == len(self.tc_list)
        assert scheduler.peek() is not None

        # alloc all cluster nodes so none are available
        self.cluster.alloc(Service.setup_node_spec(num_nodes=len(self.cluster)))
        assert self.cluster.num_available_nodes() == 0

        # peeking etc should not yield an object
        assert scheduler.peek() is None
        with pytest.raises(RuntimeError):
            scheduler.next()
Exemple #8
0
    def check_one_host_parsing(self, monkeypatch):
        """check the behavior of VagrantCluster when cluster_file is not specified. VagrantCluster should read
        cluster information from _vagrant_ssh_config().
        """
        self._set_monkeypatch_attr(monkeypatch)

        cluster = VagrantCluster()
        assert len(cluster) == 2
        assert cluster.num_available_nodes() == 2
        node1, node2 = cluster.alloc(Service.setup_node_spec(num_nodes=2))

        assert node1.account.hostname == "worker1"
        assert node1.account.user == "vagrant"
        assert node1.account.ssh_hostname == '127.0.0.1'

        assert node2.account.hostname == "worker2"
        assert node2.account.user == "vagrant"
        assert node2.account.ssh_hostname == '127.0.0.2'
Exemple #9
0
    def _preallocate_subcluster(self, test_context):
        """Preallocate the subcluster which will be used to run the test.

        Side effect: store association between the test_id and the preallocated subcluster.

        :param test_context
        :return None
        """
        test_cluster_compare = self.cluster.test_capacity_comparison(
            test_context)
        assert test_cluster_compare >= 0

        if test_cluster_compare == 0 and self.max_parallel > 1:
            self._log(
                logging.WARNING,
                "Test %s is using entire cluster. It's possible this test has no associated cluster metadata."
                % test_context.test_id)

        self._test_cluster[TestKey(test_context.test_id, self.test_counter)] = \
            FiniteSubcluster(self.cluster.alloc(Service.setup_node_spec(node_spec=test_context.expected_node_spec)))
Exemple #10
0
    def check_request_free(self):
        available = self.cluster.num_available_nodes()
        initial_size = len(self.cluster)

        # Should be able to allocate arbitrarily many nodes
        slots = self.cluster.alloc(Service.setup_node_spec(num_nodes=100))
        assert (len(slots) == 100)
        for i, slot in enumerate(slots):
            assert slot.account.hostname == 'localhost%d' % i
            assert slot.account.ssh_hostname == 'localhost'
            assert slot.account.ssh_config.hostname == 'localhost'
            assert slot.account.ssh_config.port == 22
            assert slot.account.user is None

        assert (self.cluster.num_available_nodes() == (available - 100))
        assert len(self.cluster) == initial_size  # This shouldn't change

        self.cluster.free(slots)

        assert (self.cluster.num_available_nodes() == available)
Exemple #11
0
    def _handle_finished(self, event):
        test_key = TestKey(event["test_id"], event["test_index"])
        self.receiver.send(self.event_response.finished(event))

        result = event['result']
        if result.test_status == FAIL and self.exit_first:
            self.stop_testing = True

        # Transition this test from running to finished
        del self.active_tests[test_key]
        self.finished_tests[test_key] = event
        self.results.append(result)

        # Free nodes used by the test
        subcluster = self._test_cluster[test_key]
        test_context = self._test_context[event["test_id"]]
        self.cluster.free(
            subcluster.alloc(
                Service.setup_node_spec(
                    node_spec=test_context.expected_node_spec)))
        del self._test_cluster[test_key]

        # Join on the finished test process
        self._client_procs[test_key].join()

        # Report partial result summaries - it is helpful to have partial test reports available if the
        # ducktape process is killed with a SIGKILL partway through
        test_results = copy.copy(self.results)  # shallow copy
        reporters = [
            SimpleFileSummaryReporter(test_results),
            HTMLSummaryReporter(test_results),
            JSONReporter(test_results)
        ]
        for r in reporters:
            r.report()

        if self._should_print_separator:
            terminal_width, y = get_terminal_size()
            self._log(logging.INFO, "~" * int(2 * terminal_width / 3))
Exemple #12
0
 def check_alloc_too_many(self):
     n = 10
     cluster = FiniteSubcluster(
         [MockFiniteSubclusterNode() for _ in range(n)])
     with pytest.raises(AssertionError):
         cluster.alloc(Service.setup_node_spec(num_nodes=(n + 1)))
Exemple #13
0
 def check_exhausts_supply(self):
     cluster = JsonCluster(self.single_node_cluster_json)
     with pytest.raises(RuntimeError):
         cluster.alloc(Service.setup_node_spec(num_nodes=2))