def _wait_active(self, cluster_object): utils.wait_for(resource=cluster_object, ready_statuses=["active"], failure_statuses=["error"], update_resource=self._update_cluster, timeout=CONF.benchmark.cluster_create_timeout, check_interval=CONF.benchmark.cluster_check_interval)
def _scale_stack(self, stack, output_key, delta): """Scale a stack up or down. Calls the webhook given in the output value identified by 'output_key', and waits for the stack size to change by 'delta'. :param stack: stack to scale up or down :param output_key: The name of the output to get the URL from :param delta: The expected change in number of instances in the stack (signed int) """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta LOG.debug("Scaling stack %s from %s to %s instances with %s" % (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) utils.wait_for( stack, is_ready=lambda s: ( self._count_instances(s) == expected_instances), update_resource=utils.get_from_manager( ["UPDATE_FAILED"]), timeout=CONF.benchmark.heat_stack_scale_timeout, check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
def _live_migrate(self, server, target_host, block_migration=False, disk_over_commit=False, skip_host_check=False): """Run live migration of the given server. :param server: Server object :param target_host: Specifies the target compute node to migrate :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to overcommit migrated instance or not :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.live_migrate(target_host, block_migration=block_migration, disk_over_commit=disk_over_commit) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_live_migrate_timeout, check_interval=( CONF.benchmark.nova_server_live_migrate_poll_interval) ) server_admin = self.admin_clients("nova").servers.get(server.id) if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") and not skip_host_check): raise exceptions.LiveMigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _associate_floating_ip(self, server, address, fixed_address=None, atomic_action=True): """Add floating IP to an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to add to the instance :param fixed_address: The fixedIP address the FloatingIP is to be associated with (optional) :param atomic_action: True if this is an atomic action (optional) """ if atomic_action: with atomic.ActionTimer(self, "nova.associate_floating_ip"): server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for( server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager() ) else: server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for( server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager() ) # Update server data server.addresses = server.manager.get(server.id).addresses
def _associate_floating_ip(self, server, address, fixed_address=None, atomic_action=True): """Add floating IP to an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to add to the instance :param fixed_address: The fixedIP address the FloatingIP is to be associated with (optional) :param atomic_action: True if this is an atomic action (optional) """ if atomic_action: with atomic.ActionTimer(self, "nova.associate_floating_ip"): server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for(server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager()) else: server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for(server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager()) # Update server data server.addresses = server.manager.get(server.id).addresses
def _migrate(self, server, skip_host_check=False): """Run migration of the given server. :param server: Server object :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.migrate() utils.wait_for( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_migrate_timeout, check_interval=( CONF.benchmark.nova_server_migrate_poll_interval) ) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.MigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for(server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=CONF.benchmark.vm_ping_timeout, check_interval=CONF.benchmark.vm_ping_poll_interval)
def test_wait_successful(self, mock_time, mock_sleep): res = {"status": "not_ready"} upd = mock.MagicMock(side_effect=[{ "status": "not_ready" }, { "status": "not_ready_yet" }, { "status": "still_not_ready" }, { "status": "almost_ready" }, { "status": "ready" }]) utils.wait_for( resource=res, ready_statuses=["ready"], update_resource=upd) upd.assert_has_calls([ mock.call({ "status": "not_ready" }), mock.call({ "status": "not_ready" }), mock.call({ "status": "not_ready_yet" }), mock.call({ "status": "still_not_ready" }), mock.call({ "status": "almost_ready" }) ])
def _scale_stack(self, stack, output_key, delta): """Scale a stack up or down. Calls the webhook given in the output value identified by 'output_key', and waits for the stack size to change by 'delta'. :param stack: stack to scale up or down :param output_key: The name of the output to get the URL from :param delta: The expected change in number of instances in the stack (signed int) """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta LOG.debug("Scaling stack %s from %s to %s instances with %s" % (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) utils.wait_for( stack, is_ready=lambda s: (self._count_instances(s) == expected_instances), failure_statuses=["UPDATE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_scale_timeout, check_interval=CONF.openstack.heat_stack_scale_poll_interval)
def _resize(instance, client, status, flavor_id): instance = client.instances.get(instance) change_status(instance, client, status) client.instances.resize_instance(instance, flavor_id) utils.wait_for(instance, update_resource=client.instances.get, ready_statuse=["RESIZE"]) check_ready(instance)
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for( server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=120 )
def _resize(instance, client, status, size): instance = client.instances.get(instance) change_status(instance, client, status) client.instances.resize_volume(instance['id'], size) utils.wait_for(instance, update_resource=client.instances.get, ready_statuse=["RESIZE"]) check_ready(instance)
def _resize(self, server, flavor): server.resize(flavor) utils.wait_for( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_timeout, check_interval=CONF.benchmark.nova_server_resize_poll_interval)
def _wait_for_ping(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for( server_ip, is_ready=utils.resource_is(ICMP_UP_STATUS, self._ping_ip_address), timeout=CONF.benchmark.vm_ping_timeout, check_interval=CONF.benchmark.vm_ping_poll_interval )
def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval))
def _resize_confirm(self, server, status="ACTIVE"): server.confirm_resize() utils.wait_for( server, is_ready=utils.resource_is(status), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_confirm_timeout, check_interval=( CONF.benchmark.nova_server_resize_confirm_poll_interval))
def _wait_for_swarm_ping(self, swarm_connection): utils.wait_for( None, is_ready=utils.resource_is( "UP ALL", swarm_connection.status), timeout=CONF.benchmark.vm_swarm_ping_timeout, check_interval=CONF.benchmark.vm_swarm_ping_poll_interval )
def test_exit_instantly(self, mock_sleep): res = {"status": "ready"} upd = mock.MagicMock(return_value=res) utils.wait_for(resource=res, ready_statuses=["ready"], update_resource=upd) upd.assert_called_once_with(res) self.assertFalse(mock_sleep.called)
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval )
def _wait_for_ping_linux(self, server_ip): server_ip = netaddr.IPAddress(server_ip) utils.wait_for( server_ip, ready_statuses=[Host.ICMP_UP_STATUS], update_resource=Host.update_status, timeout=CONF.benchmark.vm_ping_timeout, check_interval=CONF.benchmark.vm_ping_poll_interval )
def _resize(self, server, flavor): server.resize(flavor) utils.wait_for( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_timeout, check_interval=CONF.benchmark.nova_server_resize_poll_interval )
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval)
def _wait_active(self, cluster_object): utils.wait_for( resource=cluster_object, ready_statuses=["active"], failure_statuses=["error"], update_resource=self._update_cluster, timeout=CONF.benchmark.sahara_cluster_create_timeout, check_interval=CONF.benchmark.sahara_cluster_check_interval, )
def setup(self): utils.init_sahara_context(self) self.context["sahara"]["clusters"] = {} wait_dict = {} for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): image_id = self.context["tenants"][tenant_id]["sahara"]["image"] floating_ip_pool = self.config.get("floating_ip_pool") temporary_context = { "user": user, "tenant": self.context["tenants"][tenant_id], "task": self.context["task"], "owner_id": self.context["owner_id"] } scenario = utils.SaharaScenario(context=temporary_context) cluster = scenario._launch_cluster( plugin_name=self.config["plugin_name"], hadoop_version=self.config["hadoop_version"], flavor_id=self.config.get("flavor_id"), master_flavor_id=self.config["master_flavor_id"], worker_flavor_id=self.config["worker_flavor_id"], workers_count=self.config["workers_count"], image_id=image_id, floating_ip_pool=floating_ip_pool, volumes_per_node=self.config.get("volumes_per_node"), volumes_size=self.config.get("volumes_size", 1), auto_security_group=self.config.get("auto_security_group", True), security_groups=self.config.get("security_groups"), node_configs=self.config.get("node_configs"), cluster_configs=self.config.get("cluster_configs"), enable_anti_affinity=self.config.get("enable_anti_affinity", False), enable_proxy=self.config.get("enable_proxy", False), wait_active=False, use_autoconfig=self.config.get("use_autoconfig", True) ) self.context["tenants"][tenant_id]["sahara"]["cluster"] = ( cluster.id) # Need to save the client instance to poll for active status wait_dict[cluster] = scenario.clients("sahara") bench_utils.wait_for( resource=wait_dict, update_resource=self.update_clusters_dict, is_ready=self.all_clusters_active, timeout=CONF.benchmark.sahara_cluster_create_timeout, check_interval=CONF.benchmark.sahara_cluster_check_interval)
def setup(self): utils.init_sahara_context(self) self.context["sahara"]["clusters"] = {} wait_dict = {} for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): image_id = self.context["tenants"][tenant_id]["sahara"]["image"] floating_ip_pool = self.config.get("floating_ip_pool") temporary_context = { "user": user, "tenant": self.context["tenants"][tenant_id], "task": self.context["task"], "owner_id": self.context["owner_id"] } scenario = utils.SaharaScenario(context=temporary_context) cluster = scenario._launch_cluster( plugin_name=self.config["plugin_name"], hadoop_version=self.config["hadoop_version"], flavor_id=self.config.get("flavor_id"), master_flavor_id=self.config["master_flavor_id"], worker_flavor_id=self.config["worker_flavor_id"], workers_count=self.config["workers_count"], image_id=image_id, floating_ip_pool=floating_ip_pool, volumes_per_node=self.config.get("volumes_per_node"), volumes_size=self.config.get("volumes_size", 1), auto_security_group=self.config.get("auto_security_group", True), security_groups=self.config.get("security_groups"), node_configs=self.config.get("node_configs"), cluster_configs=self.config.get("cluster_configs"), enable_anti_affinity=self.config.get("enable_anti_affinity", False), enable_proxy=self.config.get("enable_proxy", False), wait_active=False, use_autoconfig=self.config.get("use_autoconfig", True) ) self.context["tenants"][tenant_id]["sahara"]["cluster"] = ( cluster.id) # Need to save the client instance to poll for active status wait_dict[cluster] = scenario.clients("sahara") bench_utils.wait_for( resource=wait_dict, update_resource=self.update_clusters_dict, is_ready=self.all_clusters_active, timeout=CONF.openstack.sahara_cluster_create_timeout, check_interval=CONF.openstack.sahara_cluster_check_interval)
def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval) )
def _detach_volume(self, server, volume): server_id = server.id volume_id = volume.id self.clients("nova").volumes.delete_server_volume(server_id, volume_id) utils.wait_for( volume, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval)
def _resize_confirm(self, server, status="ACTIVE"): server.confirm_resize() utils.wait_for( server, is_ready=utils.resource_is(status), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_confirm_timeout, check_interval=( CONF.benchmark.nova_server_resize_confirm_poll_interval) )
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval )
def _create_nova_vm(self, nova_client, flavor, image, keypair, server_name, sec_group_name, nova_server_boot_timeout, **kwargs): """Create nova instance :param nova_client: nova client :param flavor: int, flavor for VM instance :param image: str/uuid, image_name/image_id of the new instance :param keypair: str, key-pair to allow ssh :param server_name: str, name for VM instance :param nova_server_boot_timeout: int, max time for instance to go active :return: new nova instance """ secgroup_found = False secgroup = None # add sec-group sec_groups = nova_client.security_groups.list() for sec in sec_groups: if sec.name == sec_group_name: secgroup_found = True secgroup = sec LOG.info("Security group already present") break if not secgroup_found: LOG.info("Adding new security group") secgroup = nova_client.security_groups.create( sec_group_name, sec_group_name) # add new rule nova_client.security_group_rules.create(secgroup.id, from_port=22, to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0") # boot new nova instance LOG.info("Booting new instance: %s", server_name) server = nova_client.servers.create( server_name, image=image, flavor=flavor, key_name=keypair.name, security_groups=[secgroup.id if secgroup else None], **kwargs) # wait for instance to become active LOG.info("Waiting for instance to become active") task_utils.wait_for(server, is_ready=task_utils.resource_is("ACTIVE"), update_resource=task_utils.get_from_manager(), timeout=nova_server_boot_timeout) # assert if instance is 'active' assert ('ACTIVE' == server.status), ( "The instance is not in ACTIVE state") return server
def run(self): job_execution = self.clients("sahara").job_executions.create( job_id=job_id, cluster_id=cluster_id, input_id=input_id, output_id=output_id, configs=configs ) utils.wait_for( resource=job_execution.id, is_ready=self._job_execution_is_finished, timeout=CONF.benchmark.sahara_job_execution_timeout, check_interval=CONF.benchmark.sahara_job_check_interval, )
def run(self): job_execution = self.clients("sahara").job_executions.create( job_id=job_id, cluster_id=cluster_id, input_id=input_id, output_id=output_id, configs=configs) utils.wait_for(resource=job_execution.id, is_ready=self._job_execution_is_finished, timeout=CONF.benchmark.job_execution_timeout, check_interval=CONF.benchmark.job_check_interval)
def _detach_volume(self, server, volume): server_id = server.id volume_id = volume.id self.clients("nova").volumes.delete_server_volume(server_id, volume_id) utils.wait_for( volume, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval )
def _attach_volume(self, server, volume, device=None): server_id = server.id volume_id = volume.id self.clients("nova").volumes.create_server_volume( server_id, volume_id, device) utils.wait_for( volume, ready_statuses=["in-use"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval))
def _restart(instance, client, status): instance = client.instances.get(instance) change_status( instance, status, client.configurations if status == "RESTART_REQUIRED" else None) instance.restart utils.wait_for(instance, ready_statuse=["REBOOT"], update_resource=client.instances.get) check_ready(instance)
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for( stack, ready_statuses=["RESTORE_COMPLETE"], update_resource=utils.get_from_manager(["RESTORE_FAILED"]), timeout=CONF.benchmark.heat_stack_restore_timeout, check_interval=CONF.benchmark.heat_stack_restore_poll_interval)
def _resume_stack(self, stack): """Resume given stack. :param stack: stack that needs to be resumed """ self.clients("heat").actions.resume(stack.id) utils.wait_for( stack, ready_statuses=["RESUME_COMPLETE"], update_resource=utils.get_from_manager(["RESUME_FAILED"]), timeout=CONF.benchmark.heat_stack_resume_timeout, check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
def _dissociate_floating_ip(self, server, address): """Remove floating IP from an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to remove """ server.remove_floating_ip(address) utils.wait_for(server, is_ready=self.check_ip_address(address, must_exist=False), update_resource=utils.get_from_manager()) # Update server data server.addresses = server.manager.get(server.id).addresses
def _suspend_stack(self, stack): """Suspend given stack. :param stack: stack that needs to be suspended """ self.clients("heat").actions.suspend(stack.id) utils.wait_for( stack, ready_statuses=["SUSPEND_COMPLETE"], update_resource=utils.get_from_manager(["SUSPEND_FAILED"]), timeout=CONF.benchmark.heat_stack_suspend_timeout, check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
def _delete_cluster(self, cluster): """Delete cluster. :param cluster: cluster to delete """ LOG.debug("Deleting cluster `%s`" % cluster.name) self.clients("sahara").clusters.delete(cluster.id) utils.wait_for(resource=cluster, timeout=CONF.benchmark.cluster_delete_timeout, check_interval=CONF.benchmark.cluster_check_interval, is_ready=self._is_cluster_deleted)
def _deploy_environment(self, environment, session): """Deploy environment. :param environment: Environment instance :param session: Session instance """ self.clients("murano").sessions.deploy(environment.id, session.id) utils.wait_for( environment, is_ready=utils.resource_is("READY"), update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), timeout=CONF.benchmark.deploy_environment_timeout, check_interval=CONF.benchmark.deploy_environment_check_interval)
def _detach_volume(self, server, volume, attachment=None): server_id = server.id # NOTE(chenhb): Recommend the use of attachment.The use of # volume.id is retained mainly for backwoard compatible. attachment_id = attachment.id if attachment else volume.id self.clients("nova").volumes.delete_server_volume( server_id, attachment_id) utils.wait_for( volume, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval)
def _dissociate_floating_ip(self, server, address): """Remove floating IP from an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to remove """ server.remove_floating_ip(address) utils.wait_for( server, is_ready=self.check_ip_address(address, must_exist=False), update_resource=utils.get_from_manager() ) # Update server data server.addresses = server.manager.get(server.id).addresses
def _attach_volume(self, server, volume, device=None): server_id = server.id volume_id = volume.id attachment = self.clients("nova").volumes.create_server_volume( server_id, volume_id, device) utils.wait_for( volume, ready_statuses=["in-use"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval) ) return attachment
def _check_stack(self, stack): """Check given stack. Check the stack and stack resources. :param stack: stack that needs to be checked """ self.clients("heat").actions.check(stack.id) utils.wait_for( stack, ready_statuses=["CHECK_COMPLETE"], update_resource=utils.get_from_manager(["CHECK_FAILED"]), timeout=CONF.benchmark.heat_stack_check_timeout, check_interval=CONF.benchmark.heat_stack_check_poll_interval)
def _check_stack(self, stack): """Check given stack. Check the stack and stack resources. :param stack: stack that needs to be checked """ self.clients("heat").actions.check(stack.id) utils.wait_for( stack, is_ready=utils.resource_is("CHECK_COMPLETE"), update_resource=utils.get_from_manager(["CHECK_FAILED"]), timeout=CONF.benchmark.heat_stack_check_timeout, check_interval=CONF.benchmark.heat_stack_check_poll_interval)
def _suspend_stack(self, stack): """Suspend given stack. :param stack: stack that needs to be suspended """ self.clients("heat").actions.suspend(stack.id) utils.wait_for( stack, ready_statuses=["SUSPEND_COMPLETE"], update_resource=utils.get_from_manager( ["SUSPEND_FAILED"]), timeout=CONF.benchmark.heat_stack_suspend_timeout, check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
def _associate_floating_ip(self, server, address, fixed_address=None): """Add floating IP to an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to add to the instance :param fixed_address: The fixedIP address the FloatingIP is to be associated with (optional) """ server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for(server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager()) # Update server data server.addresses = server.manager.get(server.id).addresses
def _resume_stack(self, stack): """Resume given stack. :param stack: stack that needs to be resumed """ self.clients("heat").actions.resume(stack.id) utils.wait_for( stack, ready_statuses=["RESUME_COMPLETE"], update_resource=utils.get_from_manager( ["RESUME_FAILED"]), timeout=CONF.benchmark.heat_stack_resume_timeout, check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
def test_wait_successful(self, mock_time, mock_sleep): res = {"status": "not_ready"} upd = mock.MagicMock(side_effect=[{"status": "not_ready"}, {"status": "not_ready_yet"}, {"status": "still_not_ready"}, {"status": "almost_ready"}, {"status": "ready"}]) utils.wait_for(resource=res, ready_statuses=["ready"], update_resource=upd) upd.assert_has_calls([mock.call({"status": "not_ready"}), mock.call({"status": "not_ready"}), mock.call({"status": "not_ready_yet"}), mock.call({"status": "still_not_ready"}), mock.call({"status": "almost_ready"})])
def _deploy_environment(self, environment, session): """Deploy environment. :param environment: Environment instance :param session: Session instance """ self.clients("murano").sessions.deploy(environment.id, session.id) utils.wait_for( environment, is_ready=utils.resource_is("READY"), update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), timeout=CONF.benchmark.deploy_environment_timeout, check_interval=CONF.benchmark.deploy_environment_check_interval )
def create_servers(self): """Create VMs with chosen image.""" image_uuid = self.get_image_uuid() userdata = self.get_userdata() flavor = self.config["flavor_id"] nics = self.get_nics() keypair, public_key_path = self.create_keypair() self.create_security_group_and_rules() sg_args = {"security_groups": [self.sg.name]} if self.sg else {} os_servers = [] for i in range(self.config.get("amount", 1)): name = "%s-%d" % (self.config["deployment_name"], i) server = self.nova.servers.create(name, image_uuid, flavor, nics=nics, key_name=keypair.name, userdata=userdata, config_drive=self.config.get( "config_drive", False), **sg_args) os_servers.append(server) self.resources.create({"id": server.id}, type=SERVER_TYPE) kwargs = { "ready_statuses": ["ACTIVE"], "update_resource": utils.get_from_manager(), "timeout": 120, "check_interval": 5 } servers = [] for os_server in os_servers: os_server = utils.wait_for(os_server, **kwargs) server = provider.Server(host=_get_address(os_server), user="******", key=public_key_path) servers.append(server) for s in servers: s.ssh.wait(timeout=120, interval=5) if self.config.get("wait_for_cloud_init", False): for s in servers: utils.wait_for(s, is_ready=_cloud_init_success) return servers
def _delete_cluster(self, cluster): """Delete cluster. :param cluster: cluster to delete """ LOG.debug("Deleting cluster `%s`" % cluster.name) self.clients("sahara").clusters.delete(cluster.id) utils.wait_for( resource=cluster, timeout=CONF.benchmark.cluster_delete_timeout, check_interval=CONF.benchmark.cluster_check_interval, is_ready=self._is_cluster_deleted)
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for( stack, is_ready=utils.resource_is("RESTORE_COMPLETE"), update_resource=utils.get_from_manager(["RESTORE_FAILED"]), timeout=CONF.benchmark.heat_stack_restore_timeout, check_interval=CONF.benchmark.heat_stack_restore_poll_interval, )
def create_servers(self): """Create VMs with chosen image.""" image_uuid = self.get_image_uuid() userdata = self.get_userdata() flavor = self.config["flavor_id"] nics = self.get_nics() keypair, public_key_path = self.create_keypair() self.create_security_group_and_rules() sg_args = {"security_groups": [self.sg.name]} if self.sg else {} os_servers = [] for i in range(self.config.get("amount", 1)): name = "%s-%d" % (self.config["deployment_name"], i) server = self.nova.servers.create( name, image_uuid, flavor, nics=nics, key_name=keypair.name, userdata=userdata, config_drive=self.config.get("config_drive", False), **sg_args ) os_servers.append(server) self.resources.create({"id": server.id}, type=SERVER_TYPE) kwargs = { "ready_statuses": ["ACTIVE"], "update_resource": utils.get_from_manager(), "timeout": 120, "check_interval": 5, } servers = [] for os_server in os_servers: os_server = utils.wait_for(os_server, **kwargs) server = provider.Server(host=_get_address(os_server), user="******", key=public_key_path) servers.append(server) for s in servers: s.ssh.wait(timeout=120, interval=5) if self.config.get("wait_for_cloud_init", False): for s in servers: utils.wait_for(s, is_ready=_cloud_init_success) return servers
def _unshelve_server(self, server): """Unshelve the given server. Returns when the server is unshelved and is in the "ACTIVE" state. :param server: Server object """ server.unshelve() time.sleep(CONF.benchmark.nova_server_unshelve_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_unshelve_timeout, check_interval=CONF.benchmark.nova_server_unshelve_poll_interval )
def _detach_volume(self, server, volume, attachment=None): server_id = server.id # NOTE(chenhb): Recommend the use of attachment.The use of # volume.id is retained mainly for backwoard compatible. attachment_id = attachment.id if attachment else volume.id self.clients("nova").volumes.delete_server_volume(server_id, attachment_id) utils.wait_for( volume, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval )