Exemple #1
0
    def check_allocate_free(self):
        cluster = JsonCluster(
            {"nodes": [
                {"ssh_config": {"host": "localhost1"}},
                {"ssh_config": {"host": "localhost2"}},
                {"ssh_config": {"host": "localhost3"}}]})

        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 3)

        nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=1))
        nodes_hostnames = self.cluster_hostnames(nodes)
        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 2)

        nodes2 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
        nodes2_hostnames = self.cluster_hostnames(nodes2)
        assert len(cluster) == 3
        assert(cluster.num_available_nodes() == 0)

        assert(nodes_hostnames.isdisjoint(nodes2_hostnames))

        cluster.free(nodes)
        assert(cluster.num_available_nodes() == 1)

        cluster.free(nodes2)
        assert(cluster.num_available_nodes() == 3)
Exemple #2
0
    def check_parsing(self):
        """ Checks that RemoteAccounts are generated correctly from input JSON"""

        node = JsonCluster(
            {
                "nodes": [
                    {"ssh_config": {"host": "hostname"}}]}).alloc(Service.setup_cluster_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user is None

        ssh_config = {
            "host": "hostname",
            "user": "******",
            "hostname": "localhost",
            "port": 22
        }
        node = JsonCluster({"nodes": [{"hostname": "hostname",
                                       "user": "******",
                                       "ssh_config": ssh_config}]}).alloc(Service.setup_cluster_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user == "user"

        # check ssh configs
        assert node.account.ssh_config.host == "hostname"
        assert node.account.ssh_config.user == "user"
        assert node.account.ssh_config.hostname == "localhost"
        assert node.account.ssh_config.port == 22
    def check_with_changing_cluster_availability(self):
        """Modify cluster usage in between calls to next() """

        scheduler = TestScheduler(self.tc_list, self.cluster)

        # allocate 60 nodes; only test_id 0 should be available
        nodes = self.cluster.alloc(Service.setup_cluster_spec(num_nodes=60))
        assert self.cluster.num_available_nodes() == 40
        t = scheduler.next()
        assert t.test_id == 0
        assert scheduler.peek() is None

        # return 10 nodes, so 50 are available in the cluster
        # next test from the scheduler should be test id 1
        return_nodes = nodes[:10]
        keep_nodes = nodes[10:]
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == 50
        t = scheduler.next()
        assert t.test_id == 1
        assert scheduler.peek() is None

        # return remaining nodes, so cluster is fully available
        # next test from scheduler should be test id 2
        return_nodes = keep_nodes
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == len(self.cluster)
        t = scheduler.next()
        assert t.test_id == 2
Exemple #4
0
 def check_free_too_many(self):
     n = 10
     cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
     nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=n))
     with pytest.raises(NodeNotPresentError):
         nodes.append(MockFiniteSubclusterNode())
         cluster.free(nodes)
 def check_free_too_many(self):
     n = 10
     cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
     nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=n))
     with pytest.raises(NodeNotPresentError):
         nodes.append(MockFiniteSubclusterNode())
         cluster.free(nodes)
    def check_with_changing_cluster_availability(self):
        """Modify cluster usage in between calls to next() """

        scheduler = TestScheduler(self.tc_list, self.cluster)

        # allocate 60 nodes; only test_id 0 should be available
        nodes = self.cluster.alloc(Service.setup_cluster_spec(num_nodes=60))
        assert self.cluster.num_available_nodes() == 40
        t = scheduler.next()
        assert t.test_id == 0
        assert scheduler.peek() is None

        # return 10 nodes, so 50 are available in the cluster
        # next test from the scheduler should be test id 1
        return_nodes = nodes[: 10]
        keep_nodes = nodes[10:]
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == 50
        t = scheduler.next()
        assert t.test_id == 1
        assert scheduler.peek() is None

        # return remaining nodes, so cluster is fully available
        # next test from scheduler should be test id 2
        return_nodes = keep_nodes
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == len(self.cluster)
        t = scheduler.next()
        assert t.test_id == 2
Exemple #7
0
    def check_cluster_file_read(self, monkeypatch):
        """check the behavior of VagrantCluster when cluster_file is specified and the file exists.
        VagrantCluster should read cluster information from cluster_file.
        """
        self._set_monkeypatch_attr(monkeypatch)

        # To verify that VagrantCluster reads cluster information from the cluster_file, the
        # content in the file is intentionally made different from that returned by _vagrant_ssh_config().
        nodes_expected = []
        node1_expected = {
            "externally_routable_ip": "127.0.0.3",
            "ssh_config": {
                "host": "worker3",
                "hostname": "127.0.0.3",
                "user": "******",
                "port": 2222,
                "password": "******",
                "identityfile": "/path/to/identfile3"
            }
        }
        nodes_expected.append(node1_expected)

        node2_expected = {
            "externally_routable_ip": "127.0.0.2",
            "ssh_config": {
                "host": "worker2",
                "hostname": "127.0.0.2",
                "user": "******",
                "port": 2223,
                "password": None,
                "identityfile": "/path/to/indentfile2"
            }
        }
        nodes_expected.append(node2_expected)

        cluster_json_expected = {}
        cluster_json_expected["nodes"] = nodes_expected
        json.dump(cluster_json_expected, open(self.cluster_file, 'w+'),
                  indent=2, separators=(',', ': '), sort_keys=True)

        # Load the cluster from the json file we just created
        cluster = VagrantCluster(cluster_file=self.cluster_file, is_type_based=False)

        assert len(cluster) == 2
        assert cluster.num_available_nodes() == 2
        node2, node3 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))

        assert node3.account.hostname == "worker2"
        assert node3.account.user == "vagrant"
        assert node3.account.ssh_hostname == '127.0.0.2'
        assert node3.account.ssh_config.to_json() == node2_expected["ssh_config"]

        assert node2.account.hostname == "worker3"
        assert node2.account.user == "vagrant"
        assert node2.account.ssh_hostname == '127.0.0.3'
        assert node2.account.ssh_config.to_json() == node1_expected["ssh_config"]
    def check_allocate_free(self):
        n = 10
        cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n

        nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=1))
        assert len(nodes) == 1
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 1

        nodes2 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
        assert len(nodes2) == 2
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 3

        cluster.free(nodes)
        assert cluster.num_available_nodes() == n - 2

        cluster.free(nodes2)
        assert cluster.num_available_nodes() == n
Exemple #9
0
    def check_allocate_free(self):
        n = 10
        cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n

        nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=1))
        assert len(nodes) == 1
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 1

        nodes2 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
        assert len(nodes2) == 2
        assert len(cluster) == n
        assert cluster.num_available_nodes() == n - 3

        cluster.free(nodes)
        assert cluster.num_available_nodes() == n - 2

        cluster.free(nodes2)
        assert cluster.num_available_nodes() == n
Exemple #10
0
    def check_parsing(self):
        """ Checks that RemoteAccounts are generated correctly from input JSON"""

        node = JsonCluster(cluster_json={
            "nodes": [{
                "ssh_config": {
                    "host": "hostname"
                }
            }]
        },
                           is_type_based=False).alloc(
                               Service.setup_cluster_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user is None

        ssh_config = {
            "host": "hostname",
            "user": "******",
            "hostname": "localhost",
            "port": 22
        }
        node = JsonCluster(cluster_json={
            "nodes": [{
                "hostname": "hostname",
                "user": "******",
                "ssh_config": ssh_config
            }]
        },
                           is_type_based=False).alloc(
                               Service.setup_cluster_spec(num_nodes=1))[0]

        assert node.account.hostname == "hostname"
        assert node.account.user == "user"

        # check ssh configs
        assert node.account.ssh_config.host == "hostname"
        assert node.account.ssh_config.user == "user"
        assert node.account.ssh_config.hostname == "localhost"
        assert node.account.ssh_config.port == 22
Exemple #11
0
    def check_allocate_free(self):
        cluster = JsonCluster(cluster_json={
            "nodes": [{
                "ssh_config": {
                    "host": "localhost1"
                }
            }, {
                "ssh_config": {
                    "host": "localhost2"
                }
            }, {
                "ssh_config": {
                    "host": "localhost3"
                }
            }]
        },
                              is_type_based=False)

        assert len(cluster) == 3
        assert (cluster.num_available_nodes() == 3)

        nodes = cluster.alloc(Service.setup_cluster_spec(num_nodes=1))
        nodes_hostnames = self.cluster_hostnames(nodes)
        assert len(cluster) == 3
        assert (cluster.num_available_nodes() == 2)

        nodes2 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
        nodes2_hostnames = self.cluster_hostnames(nodes2)
        assert len(cluster) == 3
        assert (cluster.num_available_nodes() == 0)

        assert (nodes_hostnames.isdisjoint(nodes2_hostnames))

        cluster.free(nodes)
        assert (cluster.num_available_nodes() == 1)

        cluster.free(nodes2)
        assert (cluster.num_available_nodes() == 3)
Exemple #12
0
    def check_non_empty_cluster_too_small(self):
        """Ensure that scheduler does not return tests if the cluster does not have enough available nodes. """

        scheduler = TestScheduler(self.tc_list, self.cluster)
        assert len(scheduler) == len(self.tc_list)
        assert scheduler.peek() is not None

        # alloc all cluster nodes so none are available
        self.cluster.alloc(
            Service.setup_cluster_spec(num_nodes=len(self.cluster)))
        assert self.cluster.num_available_nodes() == 0

        # peeking should not yield an object
        assert scheduler.peek() is None
    def check_non_empty_cluster_too_small(self):
        """Ensure that scheduler does not return tests if the cluster does not have enough available nodes. """

        scheduler = TestScheduler(self.tc_list, self.cluster)
        assert len(scheduler) == len(self.tc_list)
        assert scheduler.peek() is not None

        # alloc all cluster nodes so none are available
        self.cluster.alloc(Service.setup_cluster_spec(num_nodes=len(self.cluster)))
        assert self.cluster.num_available_nodes() == 0

        # peeking etc should not yield an object
        assert scheduler.peek() is None
        with pytest.raises(RuntimeError):
            scheduler.next()
Exemple #14
0
    def check_one_host_parsing(self, monkeypatch):
        """check the behavior of VagrantCluster when cluster_file is not specified. VagrantCluster should read
        cluster information from _vagrant_ssh_config().
        """
        self._set_monkeypatch_attr(monkeypatch)

        cluster = VagrantCluster(is_type_based=False)
        assert len(cluster) == 2
        assert cluster.num_available_nodes() == 2
        node1, node2 = cluster.alloc(Service.setup_cluster_spec(num_nodes=2))

        assert node1.account.hostname == "worker1"
        assert node1.account.user == "vagrant"
        assert node1.account.ssh_hostname == '127.0.0.1'

        assert node2.account.hostname == "worker2"
        assert node2.account.user == "vagrant"
        assert node2.account.ssh_hostname == '127.0.0.2'
Exemple #15
0
    def check_request_free(self):
        available = self.cluster.num_available_nodes()
        initial_size = len(self.cluster)

        # Should be able to allocate arbitrarily many nodes
        nodes = self.cluster.alloc(Service.setup_cluster_spec(num_nodes=100))
        assert (len(nodes) == 100)
        for i, node in enumerate(nodes):
            assert node.account.hostname == 'localhost%d' % i
            assert node.account.ssh_hostname == 'localhost'
            assert node.account.ssh_config.hostname == 'localhost'
            assert node.account.ssh_config.port == 22
            assert node.account.user is None

        assert (self.cluster.num_available_nodes() == (available - 100))
        assert len(self.cluster) == initial_size  # This shouldn't change

        self.cluster.free(nodes)

        assert (self.cluster.num_available_nodes() == available)
Exemple #16
0
    def check_with_changing_cluster_availability(self):
        """Modify cluster usage in between calls to next() """

        scheduler = TestScheduler(self.tc_list, self.cluster)

        # start with 100-node cluster (configured in setup_method())
        # allocate 60 nodes; only test_id 0 (which needs 10 nodes) should be available
        nodes = self.cluster.alloc(Service.setup_cluster_spec(num_nodes=60))
        assert self.cluster.num_available_nodes() == 40
        t = scheduler.peek()
        assert t == self.tc0
        scheduler.remove(t)
        assert scheduler.peek() is None

        # return 10 nodes, so 50 are available in the cluster
        # next test from the scheduler should be test id 1 (which needs 50 nodes)
        return_nodes = nodes[:10]
        keep_nodes = nodes[10:]
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == 50
        t = scheduler.peek()
        assert t == self.tc1
        scheduler.remove(t)
        assert scheduler.peek() is None

        # return remaining nodes, so cluster is fully available
        # next test from scheduler should be test id 2 (which needs 100 nodes)
        return_nodes = keep_nodes
        self.cluster.free(return_nodes)
        assert self.cluster.num_available_nodes() == len(self.cluster)
        t = scheduler.peek()
        assert t == self.tc2
        scheduler.remove(t)
        # scheduler should become empty now
        assert len(scheduler) == 0
        assert scheduler.peek() is None
 def check_alloc_too_many(self):
     n = 10
     cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
     with pytest.raises(InsufficientResourcesError):
         cluster.alloc(Service.setup_cluster_spec(num_nodes=(n + 1)))
Exemple #18
0
 def check_exhausts_supply(self):
     cluster = create_json_cluster(self.single_node_cluster_json)
     with pytest.raises(InsufficientResourcesError):
         cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
Exemple #19
0
 def check_alloc_too_many(self):
     n = 10
     cluster = FiniteSubcluster(
         [MockFiniteSubclusterNode() for _ in range(n)])
     with pytest.raises(InsufficientResourcesError):
         cluster.alloc(Service.setup_cluster_spec(num_nodes=(n + 1)))
Exemple #20
0
 def check_exhausts_supply(self):
     cluster = JsonCluster(cluster_json=self.single_node_cluster_json,
                           is_type_based=False)
     with pytest.raises(InsufficientResourcesError):
         cluster.alloc(Service.setup_cluster_spec(num_nodes=2))
Exemple #21
0
 def check_exhausts_supply(self):
     cluster = JsonCluster(self.single_node_cluster_json)
     with pytest.raises(InsufficientResourcesError):
         cluster.alloc(Service.setup_cluster_spec(num_nodes=2))