class MemRepositoryTests(unittest.TestCase): def setUp(self): self.storage = MemRepository() def test_get_all(self): clusters = [FakeCluster("test_%d" % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = self.storage.get_all() for cluster in new_clusters: assert cluster in clusters def test_get(self): clusters = [FakeCluster("test_%d" % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = [self.storage.get(cluster.name) for cluster in clusters] for cluster in new_clusters: assert cluster in clusters def test_delete(self): cluster = FakeCluster("test1") self.storage.save_or_update(cluster) assert cluster.name in self.storage.clusters self.storage.delete(cluster) assert cluster.name not in self.storage.clusters
class MemRepositoryTests(unittest.TestCase): def setUp(self): self.storage = MemRepository() def test_get_all(self): clusters = [Cluster('test_%d' % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = self.storage.get_all() for cluster in new_clusters: assert cluster in clusters def test_get(self): clusters = [Cluster('test_%d' % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = [self.storage.get(cluster.name) for cluster in clusters] for cluster in new_clusters: assert cluster in clusters def test_delete(self): cluster = Cluster('test1') self.storage.save_or_update(cluster) assert cluster.name in self.storage.clusters self.storage.delete(cluster) assert cluster.name not in self.storage.clusters
class MemRepositoryTests(unittest.TestCase): def setUp(self): self.storage = MemRepository() def test_get_all(self): clusters = [FakeCluster('test_%d' % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = self.storage.get_all() for cluster in new_clusters: nt.assert_true(cluster in clusters) def test_get(self): clusters = [FakeCluster('test_%d' % i) for i in range(10)] for cluster in clusters: self.storage.save_or_update(cluster) new_clusters = [self.storage.get(cluster.name) for cluster in clusters] for cluster in new_clusters: nt.assert_true(cluster in clusters) def test_delete(self): cluster = FakeCluster('test1') self.storage.save_or_update(cluster) nt.assert_true(cluster.name in self.storage.clusters) self.storage.delete(cluster) nt.assert_false(cluster.name in self.storage.clusters)
def __init__(self, name, cloud_provider, setup_provider, user_key_name, user_key_public, user_key_private, repository=None, **extra): self.name = name self._cloud_provider = cloud_provider self._setup_provider = setup_provider self._user_key_name = user_key_name self._user_key_public = user_key_public self.user_key_private = user_key_private self.repository = repository if repository else MemRepository() self.ssh_to = extra.get('ssh_to') self.extra = extra.copy() self.nodes = dict() self.user_key_private = os.path.expandvars(self.user_key_private) self.user_key_private = os.path.expanduser(self.user_key_private) self._user_key_public = os.path.expanduser(self._user_key_public) self._user_key_public = os.path.expandvars(self._user_key_public)
def __init__(self, name, cloud_provider, setup_provider, user_key_name, user_key_public, user_key_private, repository=None, thread_pool_max_size=10, **extra): self.name = name self.template = extra.get('template', None) self.thread_pool_max_size = thread_pool_max_size self._cloud_provider = cloud_provider self._setup_provider = setup_provider self._user_key_name = user_key_name self._user_key_public = user_key_public self.user_key_private = user_key_private self.repository = repository if repository else MemRepository() self.known_hosts_file = None if hasattr(self.repository, 'storage_path'): self.known_hosts_file = os.path.join(self.repository.storage_path, "%s.known_hosts" % self.name) self.ssh_to = extra.get('ssh_to') self.extra = extra.copy() self.nodes = dict() self.user_key_private = os.path.expandvars(self.user_key_private) self.user_key_private = os.path.expanduser(self.user_key_private) self._user_key_public = os.path.expanduser(self._user_key_public) self._user_key_public = os.path.expandvars(self._user_key_public)
def __init__(self, name, user_key_name='elasticluster-key', user_key_public='~/.ssh/id_rsa.pub', user_key_private='~/.ssh/id_rsa', cloud_provider=None, setup_provider=None, availability_zone='', repository=None, start_timeout=600, ssh_probe_timeout=5, ssh_proxy_command='', thread_pool_max_size=10, **extra): self.name = name self.template = extra.pop('template', None) self._cloud_provider = cloud_provider self._setup_provider = setup_provider self.availability_zone = availability_zone self.ssh_probe_timeout = ssh_probe_timeout self.ssh_proxy_command = ssh_proxy_command self.start_timeout = start_timeout self.thread_pool_max_size = thread_pool_max_size self.user_key_name = user_key_name self.repository = repository if repository else MemRepository() self.ssh_to = extra.pop('ssh_to', None) self.user_key_private = os.path.expandvars(user_key_private) self.user_key_private = os.path.expanduser(user_key_private) self.user_key_public = os.path.expanduser(user_key_public) self.user_key_public = os.path.expandvars(user_key_public) # this needs to exist before `add_node()` is called self._naming_policy = NodeNamingPolicy() self.nodes = {} if 'nodes' in extra: # Build the internal nodes. This is mostly useful when loading # the cluster from json files. for kind, nodes in extra['nodes'].items(): for node in nodes: # adding un-named nodes before NodeNamingPolicy has # been fully populated can lead to duplicate names assert 'name' in node self.add_node(**node) del extra['nodes'] self.extra = {} # FIXME: ugly fix needed when saving and loading the same # cluster using json. The `extra` keywords will become a # single, dictionary-valued, `extra` option when calling again # the constructor. self.extra.update(extra.pop('extra', {})) # attributes that have already been defined trump whatever is # in the `extra` dictionary for key in extra.keys(): if hasattr(self, key): del extra[key] self.extra.update(extra)
def __init__(self, name, user_key_name='elasticluster-key', user_key_public='~/.ssh/id_rsa.pub', user_key_private='~/.ssh/id_rsa', cloud_provider=None, setup_provider=None, repository=None, thread_pool_max_size=10, **extra): self.name = name self.template = extra.pop('template', None) self.thread_pool_max_size = thread_pool_max_size self._cloud_provider = cloud_provider self._setup_provider = setup_provider self.user_key_name = user_key_name self.repository = repository if repository else MemRepository() self.ssh_to = extra.pop('ssh_to', None) self.user_key_private = os.path.expandvars(user_key_private) self.user_key_private = os.path.expanduser(user_key_private) self.user_key_public = os.path.expanduser(user_key_public) self.user_key_public = os.path.expandvars(user_key_public) self.nodes = dict() if 'nodes' in extra: # Build the internal nodes. This is mostly useful when loading # the cluster from json files. for kind, nodes in extra['nodes'].items(): for node in nodes: self.add_node(**node) self.extra = {} # FIXME: ugly fix needed when saving and loading the same # cluster using json. The `extra` keywords will become a # single, dictionary-valued, `extra` option when calling again # the constructor. self.extra.update(extra.pop('extra', {})) # Remove extra arguments, if defined #for key in extra.keys(): # if hasattr(self, key): # del extra[key] self.extra.update({ key: value for key, value in extra.items() if not hasattr(self, key) })
def setUp(self): self.storage = MemRepository()