def __init__(self, nodes=None): """ Initialize the ClusterSpec. :param nodes: A collection of NodeSpecs, or None to create an empty cluster spec. """ self.nodes = NodeContainer(nodes)
class ShrinkingLocalhostCluster(LocalhostCluster): def __init__(self, *args, shrink_on=1, **kwargs): super().__init__(*args, **kwargs) self.bad_nodes = NodeContainer() # which call to shrink on self.shrink_on = shrink_on self.num_alloc_calls = 0 def do_alloc(self, cluster_spec): allocated = super().do_alloc(cluster_spec) self.num_alloc_calls += 1 if self.shrink_on == self.num_alloc_calls: bad_node = allocated.pop() self._in_use_nodes.remove_node(bad_node) self.bad_nodes.add_node(bad_node) # simplified logic, we know all nodes are of the same OS/type # check if we don't have enough nodes any more # (which really should be true every time, since the largest test would be scheduled) if len(allocated) < len(cluster_spec): # return all good nodes back to be available for node in allocated: self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) raise InsufficientResourcesError("yeah") return allocated
def __init__(self, *args, **kwargs): num_nodes = kwargs.get("num_nodes", 1000) self._available_nodes = NodeContainer() for i in range(num_nodes): ssh_config = RemoteAccountSSHConfig("localhost%d" % i, hostname="localhost", port=22) self._available_nodes.add_node(ClusterNode(LinuxRemoteAccount(ssh_config))) self._in_use_nodes = NodeContainer()
def __init__(self, *args, **kwargs): super(LocalhostCluster, self).__init__() num_nodes = kwargs.get("num_nodes", 1000) self._available_nodes = NodeContainer() for i in range(num_nodes): ssh_config = RemoteAccountSSHConfig("localhost%d" % i, hostname="localhost", port=22) self._available_nodes.add_node(ClusterNode(LinuxRemoteAccount(ssh_config))) self._in_use_nodes = NodeContainer()
def check_sizes(self): empty = NodeContainer() assert 0 == empty.size() assert 0 == len(empty) nodes = [ClusterNode(MockAccount())] container = NodeContainer(nodes) assert 1 == container.size() assert 1 == len(container)
class LocalhostCluster(Cluster): """ A "cluster" that runs entirely on localhost using default credentials. This doesn't require any user configuration and is equivalent to the old defaults in cluster_config.json. There are no constraints on the resources available. """ def __init__(self, *args, **kwargs): super(LocalhostCluster, self).__init__() num_nodes = kwargs.get("num_nodes", 1000) self._available_nodes = NodeContainer() for i in range(num_nodes): ssh_config = RemoteAccountSSHConfig("localhost%d" % i, hostname="localhost", port=22) self._available_nodes.add_node(ClusterNode(LinuxRemoteAccount(ssh_config))) self._in_use_nodes = NodeContainer() def do_alloc(self, cluster_spec): allocated = self._available_nodes.remove_spec(cluster_spec) self._in_use_nodes.add_nodes(allocated) return allocated def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) node.account.close() def available(self): return ClusterSpec.from_nodes(self._available_nodes) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
class LocalhostCluster(Cluster): """ A "cluster" that runs entirely on localhost using default credentials. This doesn't require any user configuration and is equivalent to the old defaults in cluster_config.json. There are no constraints on the resources available. """ def __init__(self, *args, **kwargs): num_nodes = kwargs.get("num_nodes", 1000) self._available_nodes = NodeContainer() for i in range(num_nodes): ssh_config = RemoteAccountSSHConfig("localhost%d" % i, hostname="localhost", port=22) self._available_nodes.add_node(ClusterNode(LinuxRemoteAccount(ssh_config))) self._in_use_nodes = NodeContainer() def alloc(self, cluster_spec): allocated = self._available_nodes.remove_spec(cluster_spec) self._in_use_nodes.add_nodes(allocated) return allocated def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) node.account.close() def available(self): return ClusterSpec.from_nodes(self._available_nodes) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
def __init__(self, *args, **kwargs): num_nodes = kwargs.get("num_nodes", 1000) is_type_based = kwargs.get("is_type_based", True) self._available_nodes = NodeContainer() for i in range(num_nodes): ssh_config = RemoteAccountSSHConfig("localhost%d" % i, hostname="localhost", port=22) self._available_nodes.add_node( ClusterNode( LinuxRemoteAccount(ssh_config=ssh_config, is_type_based=is_type_based))) self._in_use_nodes = NodeContainer()
class FiniteSubcluster(Cluster): """This cluster class gives us a mechanism for allocating finite blocks of nodes from another cluster. """ def __init__(self, nodes): super(FiniteSubcluster, self).__init__() self.nodes = nodes self._available_nodes = NodeContainer(nodes) self._in_use_nodes = NodeContainer() def do_alloc(self, cluster_spec): # there cannot be any bad nodes here, # since FiniteSubcluster operates on ClusterNode objects, # which are not checked for health by NodeContainer.remove_spec # however there could be an error, specifically if a test decides to alloc more nodes than are available # in a previous ducktape version this exception was raised by remove_spec # in this one, for consistency, we let the cluster itself deal with allocation errors good_nodes, bad_nodes = self._available_nodes.remove_spec(cluster_spec) self._in_use_nodes.add_nodes(good_nodes) return good_nodes def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) def available(self): return ClusterSpec.from_nodes(self._available_nodes) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
def check_add_and_remove(self): nodes = [ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount())] container = NodeContainer([]) assert 0 == len(container) container.add_node(nodes[0]) container.add_node(nodes[1]) container.add_node(nodes[2]) container2 = container.clone() i = 0 for node in container: assert nodes[i] == node i += 1 assert 3 == len(container) container.remove_node(nodes[0]) with pytest.raises(NodeNotPresentError): container.remove_node(nodes[0]) assert 2 == len(container) assert 3 == len(container2)
class FakeCluster(Cluster): """A cluster class with counters, but no actual node objects""" def __init__(self, num_nodes): self._available_nodes = NodeContainer() for i in range(0, num_nodes): self._available_nodes.add_node(FakeClusterNode()) self._in_use_nodes = NodeContainer() def alloc(self, cluster_spec): allocated = self._available_nodes.remove_spec(cluster_spec) self._in_use_nodes.add_nodes(allocated) return allocated def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) def available(self): return ClusterSpec.from_nodes(self._available_nodes) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
class FiniteSubcluster(Cluster): """This cluster class gives us a mechanism for allocating finite blocks of nodes from another cluster. """ def __init__(self, nodes): self.nodes = nodes self._available_nodes = NodeContainer(nodes) self._in_use_nodes = NodeContainer() def alloc(self, cluster_spec): allocated = self._available_nodes.remove_spec(cluster_spec) self._in_use_nodes.add_nodes(allocated) return allocated def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_nodes.add_node(node) def available(self): return ClusterSpec.from_nodes(self._available_nodes) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
def check_add_and_remove(self): nodes = [ ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount()), ClusterNode(MockAccount()) ] container = NodeContainer([]) assert 0 == len(container) container.add_node(nodes[0]) container.add_node(nodes[1]) container.add_node(nodes[2]) container2 = container.clone() i = 0 for node in container: assert nodes[i] == node i += 1 assert 3 == len(container) container.remove_node(nodes[0]) with pytest.raises(NodeNotPresentError): container.remove_node(nodes[0]) assert 2 == len(container) assert 3 == len(container2)
class ClusterSpec(object): """ The specification for a ducktape cluster. """ @staticmethod def empty(): return ClusterSpec([]) @staticmethod def simple_linux(num_nodes): """ Create a ClusterSpec containing some simple Linux nodes. """ node_specs = [NodeSpec(LINUX)] * num_nodes return ClusterSpec(node_specs) @staticmethod def from_nodes(nodes): """ Create a ClusterSpec describing a list of nodes. """ return ClusterSpec(ClusterSpec([NodeSpec(node.operating_system) for node in nodes])) def __init__(self, nodes=None): """ Initialize the ClusterSpec. :param nodes: A collection of NodeSpecs, or None to create an empty cluster spec. """ self.nodes = NodeContainer(nodes) def __len__(self): return self.size() def __iter__(self): return self.nodes.elements() def size(self): """Return the total size of this cluster spec, including all types of nodes.""" return self.nodes.size() def add(self, other): """ Add another ClusterSpec to this one. :param node_spec: The other cluster spec. This will not be modified. :return: This ClusterSpec. """ for node_spec in other.nodes: self.nodes.add_node(node_spec) return self def clone(self): """ Returns a deep copy of this object. """ return ClusterSpec(self.nodes.clone()) def __str__(self): node_spec_to_num = {} for node_spec in self.nodes.elements(): node_spec_str = str(node_spec) node_spec_to_num[node_spec_str] = node_spec_to_num.get(node_spec_str, 0) + 1 rval = [] for node_spec_str in sorted(node_spec_to_num.keys()): node_spec = json.loads(node_spec_str) node_spec["num_nodes"] = node_spec_to_num[node_spec_str] rval.append(node_spec) return json.dumps(rval, sort_keys=True)
def __init__(self, num_nodes): self._available_nodes = NodeContainer() for i in range(0, num_nodes): self._available_nodes.add_node(FakeClusterNode()) self._in_use_nodes = NodeContainer()
def __init__(self, nodes): self.nodes = nodes self._available_nodes = NodeContainer(nodes) self._in_use_nodes = NodeContainer()
class ClusterSpec(object): """ The specification for a ducktape cluster. """ @staticmethod def empty(): return ClusterSpec([]) @staticmethod def simple_linux(num_nodes): """ Create a ClusterSpec containing some simple Linux nodes. """ node_specs_dict = {'os': LINUX, 'num_nodes': num_nodes} return ClusterSpec.from_dict(node_specs_dict) @staticmethod def from_dict(node_specs_dict): """ Create ClusterSpec from a dict of nodes specifics. Operating system defaults to 'linux'. Number of nodes default to 1. e.g. {'os':'linux', 'cpu':2, 'mem':'4GB', 'disk':'30GB', 'additional_disks':{'/dev/sdb':'100GB'}} :param node_specs_dict: The dictionary of node specifics :return: ClusterSpec """ os = node_specs_dict.get('os', LINUX) cpu_core = node_specs_dict.get('cpu') mem_size = node_specs_dict.get('mem') disk_size = node_specs_dict.get('disk') addl_disks = node_specs_dict.get('additional_disks', {}) addl_disks_gb = { d: ClusterSpec.to_gigabyte(d_size) for d, d_size in addl_disks.items() } num_nodes = node_specs_dict.get('num_nodes', 1) return ClusterSpec([ NodeSpec( os, MachineType(cpu_core, ClusterSpec.to_gigabyte(mem_size), ClusterSpec.to_gigabyte(disk_size), addl_disks_gb)) for _ in range(num_nodes) ]) @staticmethod def from_list(node_specs_dict_list): """ Create a ClusterSpec from a list of nodes specifics dictionaries. e.g. [{'cpu':1, 'mem':'500MB', 'disk':'10GB'}, {'cpu':2, 'mem':'4GB', 'disk':'30GB', 'num_nodes':2}] :param node_specs_dict_list: The list of node specifics dictionaries :return: ClusterSpec """ node_specs = [] for node_specs_dict in node_specs_dict_list: cluster_spec = ClusterSpec.from_dict(node_specs_dict) node_specs += cluster_spec.nodes return ClusterSpec.from_nodes(node_specs) @staticmethod def to_gigabyte(size): """ Return number of gigabytes parsing from size. :param size: The string representation of size in format of <number+[TB|T|GB|G|MB|M|KB|K]> :return: number of gigabytes """ if size is None: return size else: unit_definitions = { 'kb': 1024, 'k': 1024, 'mb': 1024**2, 'm': 1024**2, 'gb': 1024**3, 'g': 1024**3, 'tb': 1024**4, 't': 1024**4 } m = re.match(r"(\d*\.?\d+|\d+)\s*(\w+)", size.lower(), re.I) number = m.group(1) unit = m.group(2) num_bytes = float(number) * unit_definitions[unit] return num_bytes / unit_definitions['gb'] @staticmethod def from_nodes(nodes): """ Create a ClusterSpec describing a list of nodes. """ return ClusterSpec([ NodeSpec(node.operating_system, node.machine_type) for node in nodes ]) def __init__(self, nodes=None): """ Initialize the ClusterSpec. :param nodes: A collection of NodeSpecs, or None to create an empty cluster spec. """ self.nodes = NodeContainer(nodes) def __len__(self): return self.size() def __iter__(self): return self.nodes.elements() def size(self): """Return the total size of this cluster spec, including all types of nodes.""" return self.nodes.size() def add(self, other): """ Add another ClusterSpec to this one. :param node_spec: The other cluster spec. This will not be modified. :return: This ClusterSpec. """ for node_spec in other.nodes: self.nodes.add_node(node_spec) return self def clone(self): """ Returns a deep copy of this object. """ return ClusterSpec(self.nodes.clone()) def __str__(self): node_spec_to_num = {} for node_spec in self.nodes.elements(): node_spec_str = str(node_spec) node_spec_to_num[node_spec_str] = node_spec_to_num.get( node_spec_str, 0) + 1 rval = [] for node_spec_str in sorted(node_spec_to_num.keys()): node_spec = json.loads(node_spec_str) node_spec["num_nodes"] = node_spec_to_num[node_spec_str] rval.append(node_spec) return json.dumps(rval, sort_keys=True)
def __init__(self, cluster_json=None, *args, **kwargs): """Initialize JsonCluster JsonCluster can be initialized from: - a json-serializeable dict - a "cluster_file" containing json :param cluster_json: a json-serializeable dict containing node information. If ``cluster_json`` is None, load from file :param cluster_file (optional): Overrides the default location of the json cluster file Example json with a local Vagrant cluster:: { "nodes": [ { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker1", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2222, "user": "******" } }, { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker2", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2223, "user": "******" } } ] } """ super(JsonCluster, self).__init__() self._available_accounts = NodeContainer() self._in_use_nodes = NodeContainer() if cluster_json is None: # This is a directly instantiation of JsonCluster rather than from a subclass (e.g. VagrantCluster) cluster_file = kwargs.get("cluster_file") if cluster_file is None: cluster_file = ConsoleDefaults.CLUSTER_FILE cluster_json = json.load(open(os.path.abspath(cluster_file))) try: for ninfo in cluster_json["nodes"]: ssh_config_dict = ninfo.get("ssh_config") assert ssh_config_dict is not None, \ "Cluster json has a node without a ssh_config field: %s\n Cluster json: %s" % (ninfo, cluster_json) ssh_config = RemoteAccountSSHConfig(**ninfo.get("ssh_config", {})) remote_account = JsonCluster.make_remote_account(ssh_config, ninfo.get("externally_routable_ip")) if remote_account.externally_routable_ip is None: remote_account.externally_routable_ip = self._externally_routable_ip(remote_account) self._available_accounts.add_node(remote_account) except BaseException as e: msg = "JSON cluster definition invalid: %s: %s" % (e, traceback.format_exc(limit=16)) raise ValueError(msg) self._id_supplier = 0
class JsonCluster(Cluster): """An implementation of Cluster that uses static settings specified in a cluster file or json-serializeable dict """ def __init__(self, cluster_json=None, *args, **kwargs): """Initialize JsonCluster JsonCluster can be initialized from: - a json-serializeable dict - a "cluster_file" containing json :param cluster_json: a json-serializeable dict containing node information. If ``cluster_json`` is None, load from file :param cluster_file (optional): Overrides the default location of the json cluster file Example json with a local Vagrant cluster:: { "nodes": [ { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker1", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2222, "user": "******" } }, { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker2", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2223, "user": "******" } } ] } """ super(JsonCluster, self).__init__() self._available_accounts = NodeContainer() self._in_use_nodes = NodeContainer() if cluster_json is None: # This is a directly instantiation of JsonCluster rather than from a subclass (e.g. VagrantCluster) cluster_file = kwargs.get("cluster_file") if cluster_file is None: cluster_file = ConsoleDefaults.CLUSTER_FILE cluster_json = json.load(open(os.path.abspath(cluster_file))) try: for ninfo in cluster_json["nodes"]: ssh_config_dict = ninfo.get("ssh_config") assert ssh_config_dict is not None, \ "Cluster json has a node without a ssh_config field: %s\n Cluster json: %s" % (ninfo, cluster_json) ssh_config = RemoteAccountSSHConfig( **ninfo.get("ssh_config", {})) remote_account = JsonCluster.make_remote_account( ssh_config, ninfo.get("externally_routable_ip")) if remote_account.externally_routable_ip is None: remote_account.externally_routable_ip = self._externally_routable_ip( remote_account) self._available_accounts.add_node(remote_account) except BaseException as e: msg = "JSON cluster definition invalid: %s: %s" % ( e, traceback.format_exc(limit=16)) raise ValueError(msg) self._id_supplier = 0 @staticmethod def make_remote_account(ssh_config, externally_routable_ip=None): """Factory function for creating the correct RemoteAccount implementation.""" if ssh_config.host and WINDOWS in ssh_config.host: return WindowsRemoteAccount( ssh_config=ssh_config, externally_routable_ip=externally_routable_ip) else: return LinuxRemoteAccount( ssh_config=ssh_config, externally_routable_ip=externally_routable_ip) def do_alloc(self, cluster_spec): allocated_accounts = self._available_accounts.remove_spec(cluster_spec) allocated_nodes = [] for account in allocated_accounts: allocated_nodes.append( ClusterNode(account, slot_id=self._id_supplier)) self._id_supplier += 1 self._in_use_nodes.add_nodes(allocated_nodes) return allocated_nodes def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_accounts.add_node(node.account) node.account.close() def _externally_routable_ip(self, account): return None def available(self): return ClusterSpec.from_nodes(self._available_accounts) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
class ClusterSpec(object): """ The specification for a ducktape cluster. """ nodes: NodeContainer = None @staticmethod def empty(): return ClusterSpec([]) @staticmethod def simple_linux(num_nodes): """ Create a ClusterSpec containing some simple Linux nodes. """ node_specs = [NodeSpec(LINUX)] * num_nodes return ClusterSpec(node_specs) @staticmethod def from_nodes(nodes): """ Create a ClusterSpec describing a list of nodes. """ return ClusterSpec(ClusterSpec([NodeSpec(node.operating_system) for node in nodes])) def __init__(self, nodes=None): """ Initialize the ClusterSpec. :param nodes: A collection of NodeSpecs, or None to create an empty cluster spec. """ self.nodes = NodeContainer(nodes) def __len__(self): return self.size() def __iter__(self): return self.nodes.elements() def size(self): """Return the total size of this cluster spec, including all types of nodes.""" return self.nodes.size() def add(self, other): """ Add another ClusterSpec to this one. :param node_spec: The other cluster spec. This will not be modified. :return: This ClusterSpec. """ for node_spec in other.nodes: self.nodes.add_node(node_spec) return self def clone(self): """ Returns a deep copy of this object. """ return ClusterSpec(self.nodes.clone()) def __str__(self): node_spec_to_num = {} for node_spec in self.nodes.elements(): node_spec_str = str(node_spec) node_spec_to_num[node_spec_str] = node_spec_to_num.get(node_spec_str, 0) + 1 rval = [] for node_spec_str in sorted(node_spec_to_num.keys()): node_spec = json.loads(node_spec_str) node_spec["num_nodes"] = node_spec_to_num[node_spec_str] rval.append(node_spec) return json.dumps(rval, sort_keys=True)
def __init__(self, *args, shrink_on=1, **kwargs): super().__init__(*args, **kwargs) self.bad_nodes = NodeContainer() # which call to shrink on self.shrink_on = shrink_on self.num_alloc_calls = 0
class JsonCluster(Cluster): """An implementation of Cluster that uses static settings specified in a cluster file or json-serializeable dict """ def __init__(self, cluster_json=None, *args, **kwargs): """Initialize JsonCluster JsonCluster can be initialized from: - a json-serializeable dict - a "cluster_file" containing json :param cluster_json: a json-serializeable dict containing node information. If ``cluster_json`` is None, load from file :param cluster_file (optional): Overrides the default location of the json cluster file Example json with a local Vagrant cluster:: { "nodes": [ { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker1", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2222, "user": "******" } }, { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker2", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2223, "user": "******" } } ] } """ super(JsonCluster, self).__init__() self._available_accounts = NodeContainer() self._in_use_nodes = NodeContainer() if cluster_json is None: # This is a directly instantiation of JsonCluster rather than from a subclass (e.g. VagrantCluster) cluster_file = kwargs.get("cluster_file") if cluster_file is None: cluster_file = ConsoleDefaults.CLUSTER_FILE cluster_json = json.load(open(os.path.abspath(cluster_file))) try: for ninfo in cluster_json["nodes"]: ssh_config_dict = ninfo.get("ssh_config") assert ssh_config_dict is not None, \ "Cluster json has a node without a ssh_config field: %s\n Cluster json: %s" % (ninfo, cluster_json) ssh_config = RemoteAccountSSHConfig(**ninfo.get("ssh_config", {})) remote_account = JsonCluster.make_remote_account(ssh_config, ninfo.get("externally_routable_ip")) if remote_account.externally_routable_ip is None: remote_account.externally_routable_ip = self._externally_routable_ip(remote_account) self._available_accounts.add_node(remote_account) except BaseException as e: msg = "JSON cluster definition invalid: %s: %s" % (e, traceback.format_exc(limit=16)) raise ValueError(msg) self._id_supplier = 0 @staticmethod def make_remote_account(ssh_config, externally_routable_ip=None): """Factory function for creating the correct RemoteAccount implementation.""" if ssh_config.host and WINDOWS in ssh_config.host: return WindowsRemoteAccount(ssh_config=ssh_config, externally_routable_ip=externally_routable_ip) else: return LinuxRemoteAccount(ssh_config=ssh_config, externally_routable_ip=externally_routable_ip) def alloc(self, cluster_spec): allocated_accounts = self._available_accounts.remove_spec(cluster_spec) allocated_nodes = [] for account in allocated_accounts: allocated_nodes.append(ClusterNode(account, slot_id=self._id_supplier)) self._id_supplier += 1 self._in_use_nodes.add_nodes(allocated_nodes) return allocated_nodes def free_single(self, node): self._in_use_nodes.remove_node(node) self._available_accounts.add_node(node.account) node.account.close() def _externally_routable_ip(self, account): return None def available(self): return ClusterSpec.from_nodes(self._available_accounts) def used(self): return ClusterSpec.from_nodes(self._in_use_nodes)
def __init__(self, nodes): super(FiniteSubcluster, self).__init__() self.nodes = nodes self._available_nodes = NodeContainer(nodes) self._in_use_nodes = NodeContainer()
def __init__(self, cluster_json=None, *args, make_remote_account_func=make_remote_account, **kwargs): """Initialize JsonCluster JsonCluster can be initialized from: - a json-serializeable dict - a "cluster_file" containing json :param cluster_json: a json-serializeable dict containing node information. If ``cluster_json`` is None, load from file :param cluster_file (optional): Overrides the default location of the json cluster file Example json with a local Vagrant cluster:: { "nodes": [ { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker1", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2222, "user": "******" } }, { "externally_routable_ip": "192.168.50.151", "ssh_config": { "host": "worker2", "hostname": "127.0.0.1", "identityfile": "/path/to/private_key", "password": null, "port": 2223, "user": "******" } } ] } """ super(JsonCluster, self).__init__() self._available_accounts: NodeContainer = NodeContainer() self._bad_accounts: NodeContainer = NodeContainer() self._in_use_nodes: NodeContainer = NodeContainer() if cluster_json is None: # This is a directly instantiation of JsonCluster rather than from a subclass (e.g. VagrantCluster) cluster_file = kwargs.get("cluster_file") if cluster_file is None: cluster_file = ConsoleDefaults.CLUSTER_FILE cluster_json = json.load(open(os.path.abspath(cluster_file))) try: for ninfo in cluster_json["nodes"]: ssh_config_dict = ninfo.get("ssh_config") assert ssh_config_dict is not None, \ "Cluster json has a node without a ssh_config field: %s\n Cluster json: %s" % (ninfo, cluster_json) ssh_config = RemoteAccountSSHConfig( **ninfo.get("ssh_config", {})) remote_account = \ make_remote_account_func(ssh_config, ninfo.get("externally_routable_ip"), ssh_exception_checks=kwargs.get("ssh_exception_checks")) if remote_account.externally_routable_ip is None: remote_account.externally_routable_ip = self._externally_routable_ip( remote_account) self._available_accounts.add_node(remote_account) except BaseException as e: msg = "JSON cluster definition invalid: %s: %s" % ( e, traceback.format_exc(limit=16)) raise ValueError(msg) self._id_supplier = 0