def test_instance_context_manager(self): fake_instances = [FakeInstance() for _ in range(50)] # check that InstanceContextManager works fine sequentially for instance in fake_instances: info = [None, instance.id, instance.name, None] with context.InstanceInfoManager(info): self._make_checks(instance, sleep=False) # check that InstanceContextManager works fine in parallel with context.ThreadGroup() as tg: for instance in fake_instances: info = [None, instance.id, instance.name, None] with context.InstanceInfoManager(info): tg.spawn("make_checks", self._make_checks, instance)
def _start_instance(self, cluster, node_group, idx, aa_group, old_aa_groups=None): instance_name = g.generate_instance_name( cluster.name, node_group.name, idx) current_instance_info = context.InstanceInfo( cluster.id, None, instance_name, node_group.id) with context.InstanceInfoManager(current_instance_info): instance_id = self._run_instance( cluster, node_group, idx, aa_group=aa_group, old_aa_groups=old_aa_groups) return instance_id
def launch_instances(self, cluster, target_count): # create all instances cluster = g.change_cluster_status(cluster, self.STAGES[0]) cpo.add_provisioning_step(cluster.id, _("Create Heat stack"), 1) with context.InstanceInfoManager([cluster.id, None, None, None]): self.create_instances(cluster, target_count) # wait for all instances are up and networks ready cluster = g.change_cluster_status(cluster, self.STAGES[1]) instances = g.get_instances(cluster, self.inst_ids) self._await_networks(cluster, instances) # prepare all instances cluster = g.change_cluster_status(cluster, self.STAGES[2]) instances = g.get_instances(cluster, self.inst_ids) volumes.mount_to_instances(instances) self._configure_instances(cluster)