def _get_job_results( self, logger: gluetool.log.ContextAdapter, job_id: str ) -> Result[bs4.BeautifulSoup, Failure]: """ Run 'bkr job-results' comand and return job results. :param str job_id: Job id that will be rescheduled. :rtype: result.Result[str, Failure] :returns: :py:class:`result.Result` with job results, or specification of error. """ r_results = self._run_bkr(logger, ['job-results', job_id], commandname='bkr.job-results') if r_results.is_error: return Error(r_results.unwrap_error()) bkr_output = r_results.unwrap() try: return Ok(bs4.BeautifulSoup(bkr_output.stdout, 'xml')) except Exception as exc: return Error(Failure.from_exc( 'failed to parse job results XML', exc, command_output=bkr_output.process_output ))
def refresh_avoid_groups_hostnames(self, logger: ContextAdapter) -> Result[None, Failure]: groups: List[AvoidGroupHostnames] = [] r_avoid_groups = self.avoid_groups if r_avoid_groups.is_error: return Error(r_avoid_groups.unwrap_error()) for groupname in r_avoid_groups.unwrap(): r_list = self._fetch_avoid_group_hostnames(logger, groupname) if r_list.is_error: return Error(r_list.unwrap_error()) groups.append(AvoidGroupHostnames( groupname=groupname, hostnames=r_list.unwrap() )) r_refresh = refresh_cached_set( CACHE.get(), self.avoid_groups_hostnames_cache_key, { h.groupname: h for h in groups } ) if r_refresh.is_error: return Error(r_refresh.unwrap_error()) return Ok(None)
def release_pool_resources( self, logger: gluetool.log.ContextAdapter, raw_resource_ids: SerializedPoolResourcesIDs ) -> Result[None, Failure]: # NOTE(ivasilev) As Azure doesn't delete vm's resources (disk, secgroup, publicip) upon vm deletion # will need to delete stuff manually. Lifehack: query for tag uid=name used during vm creation # delete vm first, resources second resource_ids = AzurePoolResourcesIDs.unserialize_from_json(raw_resource_ids) def _delete_resource(res_id: str) -> Any: options = ['resource', 'delete', '--ids', res_id] return self._run_cmd_with_auth(options, json_format=False, commandname='az.resource-delete') if resource_ids.instance_id is not None: r_delete = _delete_resource(resource_ids.instance_id) if r_delete.is_error: return Error(r_delete.unwrap_error()) self.inc_costs(logger, ResourceType.VIRTUAL_MACHINE, resource_ids.ctime) if resource_ids.assorted_resource_ids is not None: for resource in resource_ids.assorted_resource_ids: r_delete = _delete_resource(resource['id']) if r_delete.is_error: return Error(r_delete.unwrap_error()) self.inc_costs(logger, AZURE_RESOURCE_TYPE[resource['type']], resource_ids.ctime) return Ok(None)
def hook_engine(hook_name: str) -> Result[ScriptEngine, Failure]: script_filepath = os.getenv(f'ARTEMIS_HOOK_{hook_name.upper()}', None) hook_callback_name = f'hook_{hook_name.upper()}' if not script_filepath: return Error( Failure('hook filepath not defined', hook_name=hook_name, script_filepath=script_filepath)) script_filepath = os.path.expanduser(script_filepath) if not os.path.exists(script_filepath): return Error( Failure('hook filepath not defined', hook_name=hook_name, script_filepath=script_filepath)) engine = ScriptEngine() r_load = engine.load_script_file(script_filepath) if r_load.is_error: return Error(r_load.unwrap_error()) if hook_callback_name not in engine.functions: return Error( Failure('hook callable not found', hook_name=hook_name, script_filepath=script_filepath, callable_name=hook_callback_name)) return Ok(engine)
def wrapper(logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session, pools: List[PoolDriver], guest_request: GuestRequest) -> PolicyReturnType: try: policy_logger = PolicyLogger(logger, policy_name) log_dict(policy_logger.debug, 'input pools', pools) r_enabled = knob_enabled.get_value(session=session) if r_enabled.is_error: return Error( Failure.from_failure('failed to test policy enablement', r_enabled.unwrap_error())) if r_enabled.unwrap() is not True: policy_logger.debug('policy disabled, skipping') return Ok(PolicyRuling(allowed_pools=pools)) r = fn(policy_logger, session, pools, guest_request) if r.is_error: return r policy_logger.debug(f'ruling: {r.unwrap()}') return r except Exception as exc: return Error( Failure.from_exc('routing policy crashed', exc, routing_policy=policy_name))
def policy_timeout_reached(logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session, pools: List[PoolDriver], guest_request: GuestRequest) -> PolicyReturnType: """ Cancel the guest request if it reached a certain age. The threshold is controlled by :py:data:`KNOB_ROUTE_REQUEST_MAX_TIME`. """ r_events = guest_request.fetch_events(session, eventname='created') if r_events.is_error: return Error(r_events.unwrap_error()) events = r_events.unwrap() if not events: return Ok(PolicyRuling(allowed_pools=pools)) r_time = KNOB_ROUTE_REQUEST_MAX_TIME.get_value(session=session) if r_time.is_error: return Error(r_time.unwrap_error()) validity = events[0].updated + datetime.timedelta(seconds=r_time.unwrap()) logger.info(f'event created {events[0].updated}, valid until {validity}') if datetime.datetime.utcnow() > validity: return Ok(PolicyRuling(cancel=True)) return Ok(PolicyRuling(allowed_pools=pools))
def _parse_job_status( self, logger: gluetool.log.ContextAdapter, job_results: bs4.BeautifulSoup ) -> Result[Tuple[str, str], Failure]: """ Parse job results and return its result and status. :param bs4.BeautifulSoup job_results: Job results in xml format. :rtype: result.Result[Tuple[str, str], Failure] :returns: a tuple with two items, job result and status, or specification of error. """ if not job_results.find('job') or len(job_results.find_all('job')) != 1: return Error(Failure( 'job results XML has unknown structure', job_results=job_results.prettify() )) job = job_results.find('job') if not job['result']: return Error(Failure( 'job results XML does not contain result attribute', job_results=job_results.prettify() )) if not job['status']: return Error(Failure( 'job results XML does not contain status attribute', job_results=job_results.prettify() )) return Ok((job['result'].lower(), job['status'].lower()))
def _update_guest_log_console_url( self, logger: gluetool.log.ContextAdapter, guest_request: GuestRequest, guest_log: GuestLog) -> Result[GuestLogUpdateProgress, Failure]: r_delay_update = KNOB_CONSOLE_BLOB_UPDATE_TICK.get_value( poolname=self.poolname) if r_delay_update.is_error: return Error(r_delay_update.unwrap_error()) delay_update = r_delay_update.unwrap() r_output = self._do_fetch_console(guest_request, 'url') if r_output.is_error: return Error(r_output.unwrap_error()) output = r_output.unwrap() if output is None: return Ok( GuestLogUpdateProgress(state=GuestLogState.IN_PROGRESS, delay_update=delay_update)) return Ok( GuestLogUpdateProgress( state=GuestLogState.COMPLETE, url=cast(Dict[str, str], output)['url'], expires=datetime.datetime.utcnow() + datetime.timedelta(seconds=KNOB_CONSOLE_URL_EXPIRES.value)))
def release_guest(self, logger: gluetool.log.ContextAdapter, guest_request: GuestRequest) -> Result[bool, Failure]: """ Release guest and its resources back to the pool. :param Guest guest: a guest to be destroyed. :rtype: result.Result[bool, str] """ if OpenStackPoolData.is_empty(guest_request): return Ok(True) if guest_request.poolname != self.poolname: return Error(Failure('guest is not owned by this pool')) r_cleanup = self._dispatch_resource_cleanup( logger, instance_id=OpenStackPoolData.unserialize( guest_request).instance_id, guest_request=guest_request) if r_cleanup.is_error: return Error(r_cleanup.unwrap_error()) return Ok(True)
def create_snapshot( self, guest_request: GuestRequest, snapshot_request: SnapshotRequest ) -> Result[ProvisioningProgress, Failure]: r_delay = KNOB_UPDATE_GUEST_REQUEST_TICK.get_value( poolname=self.poolname) if r_delay.is_error: return Error(r_delay.unwrap_error()) os_options = [ 'server', 'image', 'create', '--name', snapshot_request.snapshotname, OpenStackPoolData.unserialize(guest_request).instance_id ] r_output = self._run_os(os_options, commandname='os.server-image-create') if r_output.is_error: return Error(r_output.unwrap_error()) return Ok( ProvisioningProgress( state=ProvisioningState.PENDING, pool_data=OpenStackPoolData.unserialize(guest_request), delay_update=r_delay.unwrap()))
def can_acquire(self, logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session, guest_request: GuestRequest) -> Result[bool, Failure]: r_answer = super(OpenStackDriver, self).can_acquire(logger, session, guest_request) if r_answer.is_error: return Error(r_answer.unwrap_error()) if r_answer.unwrap() is False: return r_answer r_image = self.image_info_mapper.map_or_none(logger, guest_request) if r_image.is_error: return Error(r_image.unwrap_error()) if r_image.unwrap() is None: return Ok(False) r_flavor = self._env_to_flavor(logger, session, guest_request) if r_flavor.is_error: return Error(r_flavor.unwrap_error()) if r_flavor.unwrap() is None: return Ok(False) return Ok(True)
def test_sanity(log): # type: (Any) -> None return_values = [ Error('failed first time'), Error('failed second time'), Ok('finally passed') ] # type: List[Result[str, str]] def _check(): # type: () -> Result[str, str] return return_values.pop(0) wait('dummy check', _check, timeout=10, tick=2) assert len(log.records) == 9 # todo: check decreasing remaining time # pylint: disable=line-too-long assert re.match( r"waiting for condition 'dummy check', timeout \d seconds, check every 2 seconds", log.records[0].message) is not None # Ignore PEP8Bear assert log.records[1].message == 'calling callback function' assert log.records[ 2].message == 'check failed with \'failed first time\', assuming failure' assert re.match(r'\d seconds left, sleeping for 2 seconds$', log.records[3].message) is not None assert log.records[4].message == 'calling callback function' assert log.records[ 5].message == 'check failed with \'failed second time\', assuming failure' assert re.match(r'\d seconds left, sleeping for 2 seconds$', log.records[6].message) is not None assert log.records[7].message == 'calling callback function' assert log.records[8].message == 'check passed, assuming success'
def _update_guest_log_console_blob( self, logger: gluetool.log.ContextAdapter, guest_request: GuestRequest, guest_log: GuestLog) -> Result[GuestLogUpdateProgress, Failure]: r_delay_update = KNOB_CONSOLE_BLOB_UPDATE_TICK.get_value( poolname=self.poolname) if r_delay_update.is_error: return Error(r_delay_update.unwrap_error()) delay_update = r_delay_update.unwrap() r_output = self._do_fetch_console(guest_request, 'log', json_format=False) if r_output.is_error: return Error(r_output.unwrap_error()) output = r_output.unwrap() if output is None: return Ok( GuestLogUpdateProgress(state=GuestLogState.IN_PROGRESS, delay_update=delay_update)) return Ok( GuestLogUpdateProgress( state=GuestLogState.IN_PROGRESS, # TODO logs: well, this *is* overwriting what we already downloaded... Do something. blob=cast(str, output), delay_update=delay_update))
def test_eq(): assert Ok(1) == Ok(1) assert Error(1) == Error(1) assert Ok(1) != Error(1) assert Ok(1) != Ok(2) assert not (Ok(1) != Ok(1)) assert Ok(1) != 'foo' assert Ok('0') != Ok(0)
def test_expect_error(): o = Ok('foo') n = Error('foo') with pytest.raises(gluetool.GlueError): o.expect_error('failure') assert n.expect_error('failure') == 'foo'
def test_unwrap_error(): o = Ok('foo') n = Error('foo') with pytest.raises(gluetool.GlueError): o.unwrap_error() assert n.unwrap_error() == 'foo'
def test_unwrap(): o = Ok('foo') n = Error('foo') assert o.unwrap() == 'foo' with pytest.raises(gluetool.GlueError): n.unwrap()
def validate_config( logger: gluetool.log.ContextAdapter, server_config: Dict[str, Any]) -> Result[List[str], Failure]: """ Validate a server configuration data using a JSON schema. :return: either a list of validation errors, or a :py:class:`Failure` describing problem preventing the validation process. """ # In this list we will accumulate all validation errors reported by `validate_data`. validation_errors: List[str] = [] # First the overall server and common configuration r_schema = load_validation_schema('common.yml') if r_schema.is_error: return Error(r_schema.unwrap_error()) r_validation = validate_data(server_config, r_schema.unwrap()) if r_validation.is_error: return Error(r_validation.unwrap_error()) validation_errors += [ 'server: {}'.format(error) for error in r_validation.unwrap() ] for pool in server_config.get('pools', []): failure_details = { 'pool': pool.get('name'), 'pool_driver': pool.get('driver') } r_schema = load_validation_schema( os.path.join('drivers', pool.get('driver', '') + '.yml')) if r_schema.is_error: r_schema.unwrap_error().details.update(failure_details) return Error(r_schema.unwrap_error()) r_validation = validate_data(pool.get('parameters'), r_schema.unwrap()) if r_validation.is_error: r_validation.unwrap_error().details.update(failure_details) return r_validation validation_errors += [ 'pool "{}": {}'.format(pool.get('name'), error) for error in r_validation.unwrap() ] return Ok(validation_errors)
def map_compose_to_imagename_by_pattern_map( logger: gluetool.log.ContextAdapter, pool: PoolDriver, compose_id: str, mapping_filename: Optional[str] = None, mapping_filepath: Optional[str] = None ) -> Result[Optional[str], Failure]: """ Using a given pattern mapping file, try to map a compose to its corresponding image name. Pattern mapping files are described `here <https://gluetool.readthedocs.io/en/latest/gluetool.utils.html#gluetool.utils.PatternMap>`_. :param compose_id: compose ID to translate. :param mapping_filename: if set, pattern mapping file of this name is searched in Artemis' configuration directory. :param mapping_filepath: if set, this pattern mapping file is searched. :returns: either a image name, or :py:class:`tft.artemis.Failure` if the mapping was unsuccessfull. """ if mapping_filepath: pass elif mapping_filename: mapping_filepath = os.path.join(KNOB_CONFIG_DIRPATH.value, mapping_filename) else: return Error( Failure('no compose/image mapping file specified', compose=compose_id)) logger.debug(f'using pattern map {mapping_filepath}') r_cache_enabled = KNOB_CACHE_PATTERN_MAPS.get_value(poolname=pool.poolname) if r_cache_enabled.is_error: return Error(r_cache_enabled.unwrap_error()) r_pattern_map = get_pattern_map(logger, mapping_filepath, use_cache=r_cache_enabled.unwrap()) if r_pattern_map.is_error: return Error(r_pattern_map.unwrap_error().update(compose=compose_id)) pattern_map = r_pattern_map.unwrap() try: imagename = pattern_map.match(compose_id) except gluetool.glue.GlueError: return Ok(None) return Ok(imagename[0] if isinstance(imagename, list) else imagename)
def fetch_pool_flavor_info(self) -> Result[List[Flavor], Failure]: # Flavors are described by OpenStack CLI with the following structure: # [ # { # "Name": str, # "RAM": int, # "Ephemeral": int, # "VCPUs": int, # "Is Public": bool, # "Disk": int, # "ID": str, # }, # ... # ] r_flavors = self._run_os(['flavor', 'list'], commandname='os.flavor-list') if r_flavors.is_error: return Error(r_flavors.unwrap_error()) if self.pool_config.get('flavor-regex'): flavor_name_pattern: Optional[Pattern[str]] = re.compile( self.pool_config['flavor-regex']) else: flavor_name_pattern = None try: return Ok([ Flavor( name=flavor['Name'], id=flavor['ID'], cpu=FlavorCpu(cores=int(flavor['VCPUs'])), # memory is reported in MiB memory=UNITS.Quantity(int(flavor['RAM']), UNITS.mebibytes), disk=FlavorDisks([ FlavorDisk( # diskspace is reported in GiB size=UNITS.Quantity(int(flavor['Disk']), UNITS.gibibytes)) ]), virtualization=FlavorVirtualization(is_virtualized=True)) for flavor in cast(List[Dict[str, str]], r_flavors.unwrap()) if flavor_name_pattern is None or flavor_name_pattern.match(flavor['Name']) ]) except KeyError as exc: return Error( Failure.from_exc('malformed flavor description', exc, flavor_info=r_flavors.unwrap()))
def _submit_job( self, logger: gluetool.log.ContextAdapter, job: bs4.BeautifulSoup ) -> Result[str, Failure]: """ Submit a Beaker job. :param gluetool.log.ContextAdapter logger: parent logger whose methods will be used for logging. :param xml job: A job to submit. :rtype: result.Result[str, Failure] :returns: :py:class:`result.Result` with job id, or specification of error. """ log_xml(self.logger.debug, 'job to submit', job) with create_tempfile( file_contents=job.prettify(), prefix='beaker-job-', suffix='.xml' ) as job_filepath: # Temporary file has limited permissions, but we'd like to make the file inspectable. os.chmod(job_filepath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) r_job_submit = self._run_bkr(logger, ['job-submit', job_filepath], commandname='bkr.job-submit') if r_job_submit.is_error: return Error(r_job_submit.unwrap_error()) bkr_output = r_job_submit.unwrap() # Parse job id from output try: # Submitted: ['J:1806666'] first_job_index = bkr_output.stdout.index('\'') + 1 last_job_index = len(bkr_output.stdout) - bkr_output.stdout[::-1].index('\'') - 1 # J:1806666 job_id = bkr_output.stdout[first_job_index:last_job_index] except Exception as exc: return Error(Failure.from_exc( 'cannot convert job-submit output to job ID', exc, command_output=bkr_output.process_output )) logger.info(f'Job submitted: {job_id}') return Ok(job_id)
def create_beaker_filter( environment: Environment, pool: 'BeakerDriver', avoid_groups: List[str], avoid_hostnames: List[str] ) -> Result[Optional[bs4.BeautifulSoup], Failure]: """ From given inputs, create a Beaker filter. :param environment: environment as a source of constraints. :param avoid_groups: list of Beaker groups to filter out when provisioning. :param avoid_hostnames: list of Beaker hostnames to filter out when provisioning. :returns: a Beaker filter taking all given inputs into account. """ beaker_filters: List[bs4.BeautifulSoup] = [] if environment.has_hw_constraints: r_beaker_filter = environment_to_beaker_filter(environment, pool) if r_beaker_filter.is_error: return Error(r_beaker_filter.unwrap_error()) beaker_filters.append(r_beaker_filter.unwrap()) if avoid_groups: r_beaker_filter = groups_to_beaker_filter(avoid_groups) if r_beaker_filter.is_error: return Error(r_beaker_filter.unwrap_error()) beaker_filters.append(r_beaker_filter.unwrap()) if avoid_hostnames: r_beaker_filter = hostnames_to_beaker_filter(avoid_hostnames) if r_beaker_filter.is_error: return Error(r_beaker_filter.unwrap_error()) beaker_filters.append(r_beaker_filter.unwrap()) if not beaker_filters: return Ok(None) r_beaker_filter = merge_beaker_filters(beaker_filters) if r_beaker_filter.is_error: return Error(r_beaker_filter.unwrap_error()) return _prune_beaker_filter(r_beaker_filter.unwrap())
def _env_to_flavor(self, logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session, guest_request: GuestRequest) -> Result[Flavor, Failure]: r_flavor = self._env_to_flavor_or_none(logger, session, guest_request) if r_flavor.is_error: return Error(r_flavor.unwrap_error()) flavor = r_flavor.unwrap() if flavor is None: return Error(Failure('no suitable flavor')) return Ok(flavor)
def get_value( # type: ignore[override] # match parent self, *, session: Session, poolname: Optional[str] = None, pool: Optional['PoolDriver'] = None, **kwargs: Any ) -> Result[Optional[T], 'Failure']: if poolname is not None: pass elif pool is not None: poolname = pool.poolname else: return Error(Failure('either pool or poolname must be specified')) r_value = self._fetch_from_db(session, f'{self.knob.knobname}:{poolname}') if r_value.is_error: return r_value value = r_value.unwrap() if value is not None: return r_value return self._fetch_from_db(session, self.knob.knobname)
def get_value( self, *, poolname: Optional[str] = None, pool: Optional['PoolDriver'] = None, **kwargs: Any ) -> Result[Optional[T], 'Failure']: if poolname is not None: pass elif pool is not None: poolname = pool.poolname else: return Error(Failure('either pool or poolname must be specified')) r_value = self._fetch_from_env(f'{self.envvar}_{poolname.replace("-", "_")}') if r_value.is_error: return r_value value = r_value.unwrap() if value is not None: return r_value return self._fetch_from_env(self.envvar)
def test_collect_pool_capabilities_error( mock_inputs: MockInputs, monkeypatch: _pytest.monkeypatch.MonkeyPatch, mockpatch: MockPatcher ) -> None: mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs mockpatch( mock_pools[0], 'capabilities' ).return_value = Ok(MagicMock(name=f'{mock_pools[0].poolname}.capabilities<mock>')) mockpatch( mock_pools[1], 'capabilities' ).return_value = Error(MagicMock(name='failure<mock>')) mockpatch( mock_pools[2], 'capabilities' ).return_value = Ok(MagicMock(name=f'{mock_pools[2].poolname}.capabilities<mock>')) r = tft.artemis.routing_policies.collect_pool_capabilities(mock_pools) assert r.is_error failure = r.unwrap_error() assert isinstance(failure, tft.artemis.Failure) assert failure.caused_by == cast(MagicMock, mock_pools[1]).capabilities.return_value.unwrap_error()
def _run_bkr( self, logger: gluetool.log.ContextAdapter, options: List[str], commandname: Optional[str] = None ) -> Result[CLIOutput, Failure]: """ Run bkr command with additional options :param gluetool.log.ContextAdapter logger: logger to use for logging. :param List(str) options: options for the command :returns: either a valid result, :py:class:`CLIOutput` instance, or an error with a :py:class:`Failure` describing the problem. """ r_run = run_cli_tool( logger, self._bkr_command + options, json_output=False, poolname=self.poolname, commandname=commandname ) if r_run.is_error: return Error(r_run.unwrap_error()) return Ok(r_run.unwrap())
def test_run_routing_policies_error( mock_inputs: MockInputs, mock_policies: List[tft.artemis.routing_policies.PolicyType] ) -> None: mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs mock_failure = MagicMock(name='failure<mock>') cast(MagicMock, mock_policies[0]).return_value = Error(mock_failure) r_ruling = tft.artemis.routing_policies.run_routing_policies( mock_logger, mock_session, mock_guest_request, mock_pools, mock_policies ) assert r_ruling.is_error failure = r_ruling.unwrap_error() assert isinstance(failure, tft.artemis.Failure) assert failure.message == 'failed to route guest request' assert failure.caused_by is mock_failure cast(MagicMock, mock_policies[0]).assert_called_once_with(mock_logger, mock_session, mock_pools, mock_guest_request) cast(MagicMock, mock_policies[1]).assert_not_called()
def release_guest( self, logger: gluetool.log.ContextAdapter, guest_request: GuestRequest ) -> Result[bool, Failure]: """ Release guest and its resources back to the pool. :param Guest guest: a guest to be destroyed. :rtype: result.Result[bool, str] """ if BeakerPoolData.is_empty(guest_request): return Ok(True) r_job_cancel = self._dispatch_resource_cleanup( logger, job_id=BeakerPoolData.unserialize(guest_request).job_id, guest_request=guest_request ) if r_job_cancel.is_error: return Error(r_job_cancel.unwrap_error()) return Ok(True)
def test_evaluate_ruling_cancel_fail_state_change( workspace: Workspace, monkeypatch: _pytest.monkeypatch.MonkeyPatch) -> None: patch(monkeypatch, workspace, 'handle_success') mock_error: tft.artemis.tasks.DoerReturnType = Error( tft.artemis.Failure('mock error')) def mock_update_guest_state(*args: Any, **kwargs: Any) -> None: workspace.result = mock_error patch(monkeypatch, workspace, 'update_guest_state').side_effect = mock_update_guest_state workspace.ruling = tft.artemis.routing_policies.PolicyRuling( cancel=True, allowed_pools=[MagicMock(name='pool1'), MagicMock(name='pool2')]) assert workspace.evaluate_ruling() is workspace assert workspace.result is mock_error assert workspace.new_pool is None cast(MagicMock, workspace.update_guest_state).assert_called_once_with( tft.artemis.guest.GuestState.ERROR, current_state=tft.artemis.guest.GuestState.ROUTING) cast(MagicMock, workspace.handle_success).assert_called_once_with('routing-cancelled')