def _associate_floating_ip(self, server, address, fixed_address=None, atomic_action=True): """Add floating IP to an instance :param server: The :class:`Server` to add an IP to. :param address: The ip address or FloatingIP to add to the instance :param fixed_address: The fixedIP address the FloatingIP is to be associated with (optional) :param atomic_action: True if this is an atomic action (optional) """ if atomic_action: with atomic.ActionTimer(self, "nova.associate_floating_ip"): server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for( server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager() ) else: server.add_floating_ip(address, fixed_address=fixed_address) utils.wait_for( server, is_ready=self.check_ip_address(address), update_resource=utils.get_from_manager() ) # Update server data server.addresses = server.manager.get(server.id).addresses
def _create_node(self, driver, properties, **kwargs): """Create node immediately. :param driver: The name of the driver used to manage this Node. :param properties: Key/value pair describing the physical characteristics of the node. :param kwargs: optional parameters to create image :returns: node object """ kwargs["name"] = self.generate_random_name() node = self.admin_clients("ironic").node.create(driver=driver, properties=properties, **kwargs) self.sleep_between(CONF.openstack.ironic_node_create_poll_interval) node = utils.wait_for_status( node, ready_statuses=["AVAILABLE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.ironic_node_create_timeout, check_interval=CONF.openstack.ironic_node_poll_interval, id_attr="uuid", status_attr="provision_state" ) return node
def _create_volume(self, size, **kwargs): """Create one volume. Returns when the volume is actually created and is in the "Available" state. :param size: int be size of volume in GB, or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param kwargs: Other optional parameters to initialize the volume :returns: Created volume object """ if isinstance(size, dict): size = random.randint(size["min"], size["max"]) client = cinder_wrapper.wrap(self._clients.cinder, self) volume = client.create_volume(size, **kwargs) # NOTE(msdubov): It is reasonable to wait 5 secs before starting to # check whether the volume is ready => less API calls. self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay) volume = bench_utils.wait_for_status( volume, ready_statuses=["available"], update_resource=bench_utils.get_from_manager(), timeout=CONF.openstack.cinder_volume_create_timeout, check_interval=CONF.openstack.cinder_volume_create_poll_interval ) return volume
def _migrate(self, server, skip_host_check=False): """Run migration of the given server. :param server: Server object :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.migrate() utils.wait_for( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_migrate_timeout, check_interval=( CONF.benchmark.nova_server_migrate_poll_interval) ) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.MigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _live_migrate(self, server, target_host, block_migration=False, disk_over_commit=False, skip_host_check=False): """Run live migration of the given server. :param server: Server object :param target_host: Specifies the target compute node to migrate :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to overcommit migrated instance or not :param skip_host_check: Specifies whether to verify the targeted host availability """ server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.live_migrate(target_host, block_migration=block_migration, disk_over_commit=disk_over_commit) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_live_migrate_timeout, check_interval=( CONF.benchmark.nova_server_live_migrate_poll_interval) ) server_admin = self.admin_clients("nova").servers.get(server.id) if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") and not skip_host_check): raise exceptions.LiveMigrateException( "Migration complete but instance did not change host: %s" % host_pre_migrate)
def _create_cluster(self, cluster_template, node_count, **kwargs): """Create a cluster :param cluster_template: cluster_template for the cluster :param node_count: the cluster node count :param kwargs: optional additional arguments for cluster creation :returns: magnum cluster """ name = self.generate_random_name() cluster = self.clients("magnum").clusters.create( name=name, cluster_template_id=cluster_template, node_count=node_count, **kwargs) common_utils.interruptable_sleep( CONF.openstack.magnum_cluster_create_prepoll_delay) cluster = utils.wait_for_status( cluster, ready_statuses=["CREATE_COMPLETE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.magnum_cluster_create_timeout, check_interval=CONF.openstack.magnum_cluster_create_poll_interval, id_attr="uuid" ) return cluster
def _create_volume(self, size, **kwargs): """Create one volume. Returns when the volume is actually created and is in the "Available" state. :param size: int be size of volume in GB, or dictionary, must contain two values: min - minimum size volumes will be created as; max - maximum size volumes will be created as. :param kwargs: Other optional parameters to initialize the volume :returns: Created volume object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) if isinstance(size, dict): size = random.randint(size["min"], size["max"]) volume = self.clients("cinder").volumes.create(size, **kwargs) # NOTE(msdubov): It is reasonable to wait 5 secs before starting to # check whether the volume is ready => less API calls. time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) volume = bench_utils.wait_for( volume, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval ) return volume
def create_image(self, container_format, image_location, disk_format, **kwargs): kw = { "container_format": container_format, "disk_format": disk_format, } kw.update(kwargs) if "name" not in kw: kw["name"] = self.owner.generate_random_name() image_location = os.path.expanduser(image_location) try: if os.path.isfile(image_location): kw["data"] = open(image_location) else: kw["copy_from"] = image_location image = self.client.images.create(**kw) time.sleep(CONF.benchmark.glance_image_create_prepoll_delay) image = utils.wait_for_status( image, ["active"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.glance_image_create_timeout, check_interval=CONF.benchmark. glance_image_create_poll_interval) finally: if "data" in kw: kw["data"].close() return image
def _create_snapshot(self, volume_id, force=False, **kwargs): """Create one snapshot. Returns when the snapshot is actually created and is in the "Available" state. :param volume_id: volume uuid for creating snapshot :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param kwargs: Other optional parameters to initialize the volume :returns: Created snapshot object """ kwargs["force"] = force client = cinder_wrapper.wrap(self._clients.cinder, self) snapshot = client.create_snapshot(volume_id, **kwargs) self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay) snapshot = bench_utils.wait_for_status( snapshot, ready_statuses=["available"], update_resource=bench_utils.get_from_manager(), timeout=CONF.openstack.cinder_volume_create_timeout, check_interval=CONF.openstack.cinder_volume_create_poll_interval ) return snapshot
def _create_share(self, share_proto, size=1, **kwargs): """Create a share. :param share_proto: share protocol for new share, available values are NFS, CIFS, GlusterFS and HDFS. :param size: size of a share in GB :param snapshot_id: ID of the snapshot :param name: name of new share :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or str with ID :param share_type: either instance of ShareType or str with ID :param is_public: defines whether to set share as public or not. :returns: instance of :class:`Share` """ if not kwargs.get("name"): kwargs["name"] = self._generate_random_name() share = self.clients("manila").shares.create( share_proto, size, **kwargs) time.sleep(CONF.benchmark.manila_share_create_prepoll_delay) share = utils.wait_for( share, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.manila_share_create_timeout, check_interval=CONF.benchmark.manila_share_create_poll_interval, ) return share
def _create_bay(self, baymodel, node_count, **kwargs): """Create a bay :param baymodel: baymodel for the bay :param node_count: the bay node count :param kwargs: optional additional arguments for bay creation :returns: magnum bay """ name = self.generate_random_name() bay = self.clients("magnum").bays.create( name=name, baymodel_id=baymodel, node_count=node_count, **kwargs) common_utils.interruptable_sleep( CONF.benchmark.magnum_bay_create_prepoll_delay) bay = utils.wait_for_status( bay, ready_statuses=["CREATE_COMPLETE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.magnum_bay_create_timeout, check_interval=CONF.benchmark.magnum_bay_create_poll_interval, id_attr="uuid" ) return bay
def _update_stack(self, stack, template, parameters=None, files=None, environment=None): """Update an existing stack :param stack: stack that need to be updated :param template: Updated template :param parameters: template parameters for stack update :param files: additional files used in template :param environment: stack environment definition :returns: object of updated stack """ kw = { "stack_name": stack.stack_name, "disable_rollback": True, "parameters": parameters or {}, "template": template, "files": files or {}, "environment": environment or {} } self.clients("heat").stacks.update(stack.id, **kw) time.sleep(CONF.benchmark.heat_stack_update_prepoll_delay) stack = utils.wait_for( stack, ready_statuses=["UPDATE_COMPLETE"], update_resource=utils.get_from_manager(["UPDATE_FAILED"]), timeout=CONF.benchmark.heat_stack_update_timeout, check_interval=CONF.benchmark.heat_stack_update_poll_interval) return stack
def _scale_stack(self, stack, output_key, delta): """Scale a stack up or down. Calls the webhook given in the output value identified by 'output_key', and waits for the stack size to change by 'delta'. :param stack: stack to scale up or down :param output_key: The name of the output to get the URL from :param delta: The expected change in number of instances in the stack (signed int) """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta LOG.debug("Scaling stack %s from %s to %s instances with %s" % (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) utils.wait_for( stack, is_ready=lambda s: ( self._count_instances(s) == expected_instances), update_resource=utils.get_from_manager( ["UPDATE_FAILED"]), timeout=CONF.benchmark.heat_stack_scale_timeout, check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
def test_get_from_manager_in_error_state(self): get_from_manager = utils.get_from_manager() manager = fakes.FakeManager() resource = fakes.FakeResource(manager=manager, status="ERROR") manager._cache(resource) self.assertRaises(exceptions.GetResourceFailure, get_from_manager, resource)
def test_get_from_manager_in_deleted_state(self): get_from_manager = utils.get_from_manager() manager = fakes.FakeManager() resource = fakes.FakeResource(manager=manager, status="DELETED") manager._cache(resource) self.assertRaises(exceptions.GetResourceNotFound, get_from_manager, resource)
def _create_snapshot(self, volume_id, force=False, **kwargs): """Create one snapshot. Returns when the snapshot is actually created and is in the "Available" state. :param volume_id: volume uuid for creating snapshot :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param kwargs: Other optional parameters to initialize the volume :returns: Created snapshot object """ kwargs["display_name"] = kwargs.get("display_name", self._generate_random_name()) kwargs["force"] = force snapshot = self.clients("cinder").volume_snapshots.create(volume_id, **kwargs) time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay) snapshot = bench_utils.wait_for( snapshot, is_ready=bench_utils.resource_is("available"), update_resource=bench_utils.get_from_manager(), timeout=CONF.benchmark.cinder_volume_create_timeout, check_interval=CONF.benchmark.cinder_volume_create_poll_interval ) return snapshot
def delete_image(self, image): image.delete() utils.wait_for_status( image, ["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.benchmark.glance_image_delete_timeout, check_interval=CONF.benchmark.glance_image_delete_poll_interval)
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval )
def _wait(self, ready_statuses, failure_statuses): self.stack = utils.wait_for_status( self.stack, check_interval=10, timeout=1200, ready_statuses=ready_statuses, failure_statuses=failure_statuses, update_resource=utils.get_from_manager(), )
def _resize(self, server, flavor): server.resize(flavor) utils.wait_for( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_timeout, check_interval=CONF.benchmark.nova_server_resize_poll_interval )
def _wait(self, ready_statuses, failure_statuses): self.stack = utils.wait_for_status( self.stack, check_interval=CONF.openstack.heat_stack_create_poll_interval, timeout=CONF.openstack.heat_stack_create_timeout, ready_statuses=ready_statuses, failure_statuses=failure_statuses, update_resource=utils.get_from_manager(), )
def _resize_confirm(self, server, status="ACTIVE"): server.confirm_resize() utils.wait_for_status( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resize_confirm_timeout, check_interval=( CONF.openstack.nova_server_resize_confirm_poll_interval) )
def _resize_confirm(self, server, status="ACTIVE"): server.confirm_resize() utils.wait_for( server, is_ready=utils.resource_is(status), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_confirm_timeout, check_interval=( CONF.benchmark.nova_server_resize_confirm_poll_interval) )
def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_reboot_timeout, check_interval=CONF.benchmark.nova_server_reboot_poll_interval )
def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_resize_revert_timeout, check_interval=( CONF.benchmark.nova_server_resize_revert_poll_interval) )
def test_get_from_manager_http_exception(self): get_from_manager = utils.get_from_manager() manager = mock.MagicMock() resource = fakes.FakeResource(manager=manager, status="ERROR") class HTTPException(Exception): pass manager.get = mock.MagicMock(side_effect=HTTPException) self.assertRaises(exceptions.GetResourceFailure, get_from_manager, resource)
def test_get_from_manager_not_found(self): get_from_manager = utils.get_from_manager() manager = mock.MagicMock() resource = fakes.FakeResource(manager=manager, status="ERROR") class NotFoundException(Exception): http_status = 404 manager.get = mock.MagicMock(side_effect=NotFoundException) self.assertRaises(exceptions.GetResourceNotFound, get_from_manager, resource)
def _delete_security_service(self, security_service): """Delete security service. :param security_service: instance of :class:`SecurityService`. """ security_service.delete() utils.wait_for_delete( security_service, update_resource=utils.get_from_manager(), timeout=CONF.benchmark.manila_share_delete_timeout, check_interval=CONF.benchmark.manila_share_delete_poll_interval)
def _delete_share_network(self, share_network): """Delete share network. :param share_network: instance of :class:`ShareNetwork`. """ share_network.delete() utils.wait_for_delete( share_network, update_resource=utils.get_from_manager(), timeout=CONF.benchmark.manila_share_delete_timeout, check_interval=CONF.benchmark.manila_share_delete_poll_interval)
def _detach_volume(self, server, volume): server_id = server.id volume_id = volume.id self.clients("nova").volumes.delete_server_volume(server_id, volume_id) utils.wait_for( volume, is_ready=utils.resource_is("available"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_detach_volume_timeout, check_interval=CONF.benchmark.nova_detach_volume_poll_interval )
def _delete_snapshot(self, snapshot): """Delete the given snapshot. Returns when the snapshot is actually deleted. :param snapshot: snapshot object """ snapshot.delete() bench_utils.wait_for_status( snapshot, ready_statuses=["deleted"], check_deletion=True, update_resource=bench_utils.get_from_manager(), timeout=CONF.openstack.cinder_volume_delete_timeout, check_interval=CONF.openstack.cinder_volume_delete_poll_interval)
def _delete_backup(self, backup): """Delete the given backup. Returns when the backup is actually deleted. :param backup: backup instance """ backup.delete() bench_utils.wait_for_status( backup, ready_statuses=["deleted"], check_deletion=True, update_resource=bench_utils.get_from_manager(), timeout=CONF.openstack.cinder_volume_delete_timeout, check_interval=CONF.openstack.cinder_volume_delete_poll_interval)
def _unshelve_server(self, server): """Unshelve the given server. Returns when the server is unshelved and is in the "ACTIVE" state. :param server: Server object """ server.unshelve() time.sleep(CONF.benchmark.nova_server_unshelve_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_unshelve_timeout, check_interval=CONF.benchmark.nova_server_unshelve_poll_interval)
def _rebuild_server(self, server, image, **kwargs): """Rebuild a server with a new image. :param server: The server to rebuild. :param image: The new image to rebuild the server with. :param kwargs: Optional additional arguments to pass to the rebuild """ server.rebuild(image, **kwargs) self.sleep_between(CONF.openstack.nova_server_rebuild_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_rebuild_timeout, check_interval=CONF.openstack.nova_server_rebuild_poll_interval)
def _unrescue_server(self, server): """Unrescue the given server. Returns when the server is unrescue and waits to become ACTIVE :param server: Server object """ server.unrescue() time.sleep(CONF.benchmark.nova_server_unrescue_prepoll_delay) utils.wait_for( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_unrescue_timeout, check_interval=CONF.benchmark.nova_server_unrescue_poll_interval)
def _delete_image(self, image): """Deletes given image. Returns when the image is actually deleted. :param image: Image object """ image.delete() utils.wait_for_status( image, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.benchmark.glance_image_delete_timeout, check_interval=CONF.benchmark.glance_image_delete_poll_interval)
def _rebuild_server(self, server, image, **kwargs): """Rebuild a server with a new image. :param server: The server to rebuild. :param image: The new image to rebuild the server with. :param kwargs: Optional additional arguments to pass to the rebuild """ server.rebuild(image, **kwargs) time.sleep(CONF.benchmark.nova_server_rebuild_prepoll_delay) utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_rebuild_timeout, check_interval=CONF.benchmark.nova_server_rebuild_poll_interval)
def _start_server(self, server): """Start the given server. A start will be issued for the given server upon which time this method will wait for it to become ACTIVE. :param server: The server to start and wait to become ACTIVE. """ server.start() utils.wait_for( server, is_ready=utils.resource_is("ACTIVE"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_start_timeout, check_interval=CONF.benchmark.nova_server_start_poll_interval)
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for( stack, is_ready=utils.resource_is("RESTORE_COMPLETE"), update_resource=utils.get_from_manager( ["RESTORE_FAILED"]), timeout=CONF.benchmark.heat_stack_restore_timeout, check_interval=CONF.benchmark.heat_stack_restore_poll_interval )
def _stop_server(self, server): """Stop the given server. Issues a stop on the given server and waits for the server to become SHUTOFF. :param server: The server to stop. """ server.stop() utils.wait_for( server, is_ready=utils.resource_is("SHUTOFF"), update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_stop_timeout, check_interval=CONF.benchmark.nova_server_stop_poll_interval)
def _delete_volume(self, volume): """Delete the given volume. Returns when the volume is actually deleted. :param volume: volume object """ volume.delete() bench_utils.wait_for_status( volume, ready_statuses=["deleted"], check_deletion=True, update_resource=bench_utils.get_from_manager(), timeout=CONF.openstack.cinder_volume_delete_timeout, check_interval=CONF.openstack.cinder_volume_delete_poll_interval)
def _create_audit(self, audit_template_uuid): audit = self.admin_clients("watcher").audit.create( audit_template_uuid=audit_template_uuid, audit_type="ONESHOT") utils.wait_for_status( audit, ready_statuses=["SUCCEEDED"], failure_statuses=["FAILED"], status_attr="state", update_resource=utils.get_from_manager(), timeout=CONF.openstack.watcher_audit_launch_timeout, check_interval=CONF.openstack.watcher_audit_launch_poll_interval, id_attr="uuid" ) return audit
def _deploy_environment(self, environment, session): """Deploy environment. :param environment: Environment instance :param session: Session instance """ self.clients("murano").sessions.deploy(environment.id, session.id) config = CONF.benchmark utils.wait_for( environment, ready_statuses=["READY"], update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), timeout=config.murano_deploy_environment_timeout, check_interval=config.murano_deploy_environment_check_interval)
def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for_status( stack, ready_statuses=["RESTORE_COMPLETE"], failure_statuses=["RESTORE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_restore_timeout, check_interval=CONF.openstack.heat_stack_restore_poll_interval )
def _stop_server(self, server): """Stop the given server. Issues a stop on the given server and waits for the server to become SHUTOFF. :param server: The server to stop. """ server.stop() utils.wait_for_status( server, ready_statuses=["SHUTOFF"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_stop_timeout, check_interval=CONF.openstack.nova_server_stop_poll_interval)
def _check_stack(self, stack): """Check given stack. Check the stack and stack resources. :param stack: stack that needs to be checked """ self.clients("heat").actions.check(stack.id) utils.wait_for_status( stack, ready_statuses=["CHECK_COMPLETE"], failure_statuses=["CHECK_FAILED", "ERROR"], update_resource=utils.get_from_manager(["CHECK_FAILED"]), timeout=CONF.openstack.heat_stack_check_timeout, check_interval=CONF.openstack.heat_stack_check_poll_interval)
def _snapshot_stack(self, stack): """Creates a snapshot for given stack. :param stack: stack that will be used as base for snapshot :returns: snapshot created for given stack """ snapshot = self.clients("heat").stacks.snapshot(stack.id) utils.wait_for_status( stack, ready_statuses=["SNAPSHOT_COMPLETE"], failure_statuses=["SNAPSHOT_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_snapshot_timeout, check_interval=CONF.openstack.heat_stack_snapshot_poll_interval) return snapshot
def _delete_stack(self, stack): """Delete given stack. Returns when the stack is actually deleted. :param stack: stack object """ stack.delete() utils.wait_for_status( stack, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.benchmark.heat_stack_delete_timeout, check_interval=CONF.benchmark.heat_stack_delete_poll_interval)
def create_servers(self): """Create VMs with chosen image.""" image_uuid = self.get_image_uuid() userdata = self.get_userdata() flavor = self.config["flavor_id"] nics = self.get_nics() keypair, public_key_path = self.create_keypair() self.create_security_group_and_rules() sg_args = {"security_groups": [self.sg.name]} if self.sg else {} os_servers = [] for i in range(self.config.get("amount", 1)): name = "%s-%d" % (self.config["deployment_name"], i) server = self.nova.servers.create( name, image_uuid, flavor, nics=nics, key_name=keypair.name, userdata=userdata, config_drive=self.config.get("config_drive", False), **sg_args) os_servers.append(server) self.resources.create({"id": server.id}, type=SERVER_TYPE) kwargs = { "ready_statuses": ["ACTIVE"], "update_resource": utils.get_from_manager(), "timeout": 120, "check_interval": 5 } servers = [] for os_server in os_servers: os_server = utils.wait_for(os_server, **kwargs) server = provider.Server(host=_get_address(os_server), user="******", key=public_key_path) servers.append(server) for s in servers: s.ssh.wait(timeout=120, interval=5) if self.config.get("wait_for_cloud_init", False): for s in servers: utils.wait_for(s, is_ready=_cloud_init_success) return servers
def _resume_server(self, server): """Resumes the suspended server. Returns when the server is actually resumed and is in the "ACTIVE" state. :param server: Server object """ server.resume() self.sleep_between(CONF.openstack.nova_server_resume_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resume_timeout, check_interval=CONF.openstack.nova_server_resume_poll_interval)
def _delete_node(self, node): """Delete the node with specific id. :param node: Ironic node object """ self.admin_clients("ironic").node.delete(node.uuid) utils.wait_for_status( node, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.ironic_node_delete_timeout, check_interval=CONF.openstack.ironic_node_poll_interval, id_attr="uuid", status_attr="provision_state")
def _unshelve_server(self, server): """Unshelve the given server. Returns when the server is unshelved and is in the "ACTIVE" state. :param server: Server object """ server.unshelve() self.sleep_between(CONF.openstack.nova_server_unshelve_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_unshelve_timeout, check_interval=CONF.openstack.nova_server_unshelve_poll_interval)
def _pause_server(self, server): """Pause the live server. Returns when the server is actually paused and is in the "PAUSED" state. :param server: Server object """ server.pause() self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["PAUSED"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_pause_timeout, check_interval=CONF.openstack.nova_server_pause_poll_interval)
def _delete_environment(self, environment): """Delete given environment. Return when the environment is actually deleted. :param environment: Environment instance """ self.clients("murano").environments.delete(environment.id) config = CONF.benchmark utils.wait_for_delete( environment, update_resource=utils.get_from_manager(), timeout=config.murano_delete_environment_timeout, check_interval=config.murano_delete_environment_check_interval )
def _set_alarm_state(self, alarm, state, timeout): """Set the state of the alarm. :param alarm: alarm instance :param state: an alarm state to be set :param timeout: The number of seconds for which to attempt a successful check of the alarm state. :returns: alarm in the set state """ self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state) return bench_utils.wait_for( alarm, ready_statuses=[state], update_resource=bench_utils.get_from_manager(), timeout=timeout, check_interval=1)
def _shelve_server(self, server): """Shelve the given server. Returns when the server is actually shelved and is in the "SHELVED_OFFLOADED" state. :param server: Server object """ server.shelve() self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["SHELVED_OFFLOADED"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_shelve_timeout, check_interval=CONF.benchmark.nova_server_shelve_poll_interval)
def _unpause_server(self, server): """Unpause the paused server. Returns when the server is actually unpaused and is in the "ACTIVE" state. :param server: Server object """ server.unpause() self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_unpause_timeout, check_interval=CONF.benchmark.nova_server_unpause_poll_interval)
def _suspend_server(self, server): """Suspends the given server. Returns when the server is actually suspended and is in the "Suspended" state. :param server: Server object """ server.suspend() self.sleep_between(CONF.benchmark.nova_server_suspend_prepoll_delay) utils.wait_for_status( server, ready_statuses=["SUSPENDED"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_suspend_timeout, check_interval=CONF.benchmark.nova_server_suspend_poll_interval)
def _rescue_server(self, server): """Rescue the given server. Returns when the server is actually rescue and is in the "Rescue" state. :param server: Server object """ server.rescue() self.sleep_between(CONF.benchmark.nova_server_rescue_prepoll_delay) utils.wait_for_status( server, ready_statuses=["RESCUE"], update_resource=utils.get_from_manager(), timeout=CONF.benchmark.nova_server_rescue_timeout, check_interval=CONF.benchmark.nova_server_rescue_poll_interval)
def _delete_stack(self, stack): """Delete given stack. Returns when the stack is actually deleted. :param stack: stack object """ stack.delete() utils.wait_for_status( stack, ready_statuses=["DELETE_COMPLETE"], failure_statuses=["DELETE_FAILED", "ERROR"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_delete_timeout, check_interval=CONF.openstack.heat_stack_delete_poll_interval)