示例#1
0
    def release_guest(
        self,
        logger: gluetool.log.ContextAdapter,
        guest_request: GuestRequest
    ) -> Result[bool, Failure]:
        """
        Release guest and its resources back to the pool.

        :param Guest guest: a guest to be destroyed.
        :rtype: result.Result[bool, str]
        """

        if BeakerPoolData.is_empty(guest_request):
            return Ok(True)

        r_job_cancel = self._dispatch_resource_cleanup(
            logger,
            job_id=BeakerPoolData.unserialize(guest_request).job_id,
            guest_request=guest_request
        )

        if r_job_cancel.is_error:
            return Error(r_job_cancel.unwrap_error())

        return Ok(True)
示例#2
0
def test_driver_load_or_none(
    logger: gluetool.log.ContextAdapter,
    session: sqlalchemy.orm.session.Session,
    mockpatch: MockPatcher
) -> None:
    mock_record = MagicMock(
        name='Pool()<M>',
        driver='dummy-driver',
        parameters={}
    )

    mock_pool = MagicMock(name='pool<M>')

    mockpatch(tft.artemis.drivers.SafeQuery, 'one_or_none').return_value = Ok(mock_record)  # type: ignore[attr-defined]
    mockpatch(tft.artemis.drivers.PoolDriver, '_instantiate').return_value = Ok(mock_pool)

    r_pool = tft.artemis.drivers.PoolDriver.load_or_none(logger, session, 'dummy-pool')

    assert r_pool.is_ok

    pool = r_pool.unwrap()

    cast(MagicMock, tft.artemis.drivers.PoolDriver._instantiate).assert_called_once_with(
        logger,
        'dummy-driver',
        'dummy-pool',
        mock_record.parameters
    )

    assert pool is mock_pool
示例#3
0
def policy_supports_spot_instances(
        logger: gluetool.log.ContextAdapter,
        session: sqlalchemy.orm.session.Session, pools: List[PoolDriver],
        guest_request: GuestRequest) -> PolicyReturnType:
    """
    If guest request requires spot instance, disallow all pools that lack this capability.
    """

    # If request does not insist on using spot or non-spot instance, we can easily move forward and use any
    # pool we've been given.
    if guest_request.environment.spot_instance is None:
        return Ok(PolicyRuling(allowed_pools=pools))

    r_capabilities = collect_pool_capabilities(pools)

    if r_capabilities.is_error:
        return Error(r_capabilities.unwrap_error())

    pool_capabilities = r_capabilities.unwrap()

    # Pick only pools whose spot instance support matches the request - a pool cannot support both kinds at the same
    # time.
    return Ok(
        PolicyRuling(allowed_pools=[
            pool for pool, capabilities in pool_capabilities
            if capabilities.supports_spot_instances is
            guest_request.environment.spot_instance
        ]))
示例#4
0
def policy_timeout_reached(logger: gluetool.log.ContextAdapter,
                           session: sqlalchemy.orm.session.Session,
                           pools: List[PoolDriver],
                           guest_request: GuestRequest) -> PolicyReturnType:
    """
    Cancel the guest request if it reached a certain age. The threshold is controlled by
    :py:data:`KNOB_ROUTE_REQUEST_MAX_TIME`.
    """

    r_events = guest_request.fetch_events(session, eventname='created')

    if r_events.is_error:
        return Error(r_events.unwrap_error())

    events = r_events.unwrap()

    if not events:
        return Ok(PolicyRuling(allowed_pools=pools))

    r_time = KNOB_ROUTE_REQUEST_MAX_TIME.get_value(session=session)

    if r_time.is_error:
        return Error(r_time.unwrap_error())

    validity = events[0].updated + datetime.timedelta(seconds=r_time.unwrap())

    logger.info(f'event created {events[0].updated}, valid until {validity}')

    if datetime.datetime.utcnow() > validity:
        return Ok(PolicyRuling(cancel=True))

    return Ok(PolicyRuling(allowed_pools=pools))
示例#5
0
def policy_prefer_spot_instances(
        logger: gluetool.log.ContextAdapter,
        session: sqlalchemy.orm.session.Session, pools: List[PoolDriver],
        guest_request: GuestRequest) -> PolicyReturnType:
    """
    Prefer pools capable of using spot instances to satisfy the request. If there are no such pools, all given pools
    are returned - *prefer*, not *allow only*.
    """

    # If request does insist on using spot or non-spot instance, we should not mess with its request by
    # possibly removing the group it requests. For such environments, do nothing and let other policies
    # apply their magic.
    if guest_request.environment.spot_instance is not None:
        return Ok(PolicyRuling(allowed_pools=pools))

    r_capabilities = collect_pool_capabilities(pools)

    if r_capabilities.is_error:
        return Error(r_capabilities.unwrap_error())

    preferred_pools = [
        pool for pool, capabilities in r_capabilities.unwrap()
        if capabilities.supports_spot_instances is True
    ]

    if not preferred_pools:
        return Ok(PolicyRuling(allowed_pools=pools))

    return Ok(PolicyRuling(allowed_pools=preferred_pools))
示例#6
0
    def _update_guest_log_console_url(
            self, logger: gluetool.log.ContextAdapter,
            guest_request: GuestRequest,
            guest_log: GuestLog) -> Result[GuestLogUpdateProgress, Failure]:
        r_delay_update = KNOB_CONSOLE_BLOB_UPDATE_TICK.get_value(
            poolname=self.poolname)

        if r_delay_update.is_error:
            return Error(r_delay_update.unwrap_error())

        delay_update = r_delay_update.unwrap()

        r_output = self._do_fetch_console(guest_request, 'url')

        if r_output.is_error:
            return Error(r_output.unwrap_error())

        output = r_output.unwrap()

        if output is None:
            return Ok(
                GuestLogUpdateProgress(state=GuestLogState.IN_PROGRESS,
                                       delay_update=delay_update))

        return Ok(
            GuestLogUpdateProgress(
                state=GuestLogState.COMPLETE,
                url=cast(Dict[str, str], output)['url'],
                expires=datetime.datetime.utcnow() +
                datetime.timedelta(seconds=KNOB_CONSOLE_URL_EXPIRES.value)))
示例#7
0
def test_collect_pool_capabilities_error(
    mock_inputs: MockInputs,
    monkeypatch: _pytest.monkeypatch.MonkeyPatch,
    mockpatch: MockPatcher
) -> None:
    mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs

    mockpatch(
        mock_pools[0],
        'capabilities'
    ).return_value = Ok(MagicMock(name=f'{mock_pools[0].poolname}.capabilities<mock>'))
    mockpatch(
        mock_pools[1],
        'capabilities'
    ).return_value = Error(MagicMock(name='failure<mock>'))
    mockpatch(
        mock_pools[2],
        'capabilities'
    ).return_value = Ok(MagicMock(name=f'{mock_pools[2].poolname}.capabilities<mock>'))

    r = tft.artemis.routing_policies.collect_pool_capabilities(mock_pools)

    assert r.is_error

    failure = r.unwrap_error()

    assert isinstance(failure, tft.artemis.Failure)
    assert failure.caused_by == cast(MagicMock, mock_pools[1]).capabilities.return_value.unwrap_error()
示例#8
0
    def release_guest(self, logger: gluetool.log.ContextAdapter,
                      guest_request: GuestRequest) -> Result[bool, Failure]:
        """
        Release guest and its resources back to the pool.

        :param Guest guest: a guest to be destroyed.
        :rtype: result.Result[bool, str]
        """

        if OpenStackPoolData.is_empty(guest_request):
            return Ok(True)

        if guest_request.poolname != self.poolname:
            return Error(Failure('guest is not owned by this pool'))

        r_cleanup = self._dispatch_resource_cleanup(
            logger,
            instance_id=OpenStackPoolData.unserialize(
                guest_request).instance_id,
            guest_request=guest_request)

        if r_cleanup.is_error:
            return Error(r_cleanup.unwrap_error())

        return Ok(True)
示例#9
0
    def can_acquire(self, logger: gluetool.log.ContextAdapter,
                    session: sqlalchemy.orm.session.Session,
                    guest_request: GuestRequest) -> Result[bool, Failure]:
        r_answer = super(OpenStackDriver,
                         self).can_acquire(logger, session, guest_request)

        if r_answer.is_error:
            return Error(r_answer.unwrap_error())

        if r_answer.unwrap() is False:
            return r_answer

        r_image = self.image_info_mapper.map_or_none(logger, guest_request)
        if r_image.is_error:
            return Error(r_image.unwrap_error())

        if r_image.unwrap() is None:
            return Ok(False)

        r_flavor = self._env_to_flavor(logger, session, guest_request)

        if r_flavor.is_error:
            return Error(r_flavor.unwrap_error())

        if r_flavor.unwrap() is None:
            return Ok(False)

        return Ok(True)
示例#10
0
    def _update_guest_log_console_blob(
            self, logger: gluetool.log.ContextAdapter,
            guest_request: GuestRequest,
            guest_log: GuestLog) -> Result[GuestLogUpdateProgress, Failure]:
        r_delay_update = KNOB_CONSOLE_BLOB_UPDATE_TICK.get_value(
            poolname=self.poolname)

        if r_delay_update.is_error:
            return Error(r_delay_update.unwrap_error())

        delay_update = r_delay_update.unwrap()

        r_output = self._do_fetch_console(guest_request,
                                          'log',
                                          json_format=False)

        if r_output.is_error:
            return Error(r_output.unwrap_error())

        output = r_output.unwrap()

        if output is None:
            return Ok(
                GuestLogUpdateProgress(state=GuestLogState.IN_PROGRESS,
                                       delay_update=delay_update))

        return Ok(
            GuestLogUpdateProgress(
                state=GuestLogState.IN_PROGRESS,
                # TODO logs: well, this *is* overwriting what we already downloaded... Do something.
                blob=cast(str, output),
                delay_update=delay_update))
示例#11
0
def policy_least_crowded(logger: gluetool.log.ContextAdapter,
                         session: sqlalchemy.orm.session.Session,
                         pools: List[PoolDriver],
                         guest_request: GuestRequest) -> PolicyReturnType:
    """
    Pick the least crowded pools, i.e. pools with the lowest absolute usage.
    """

    if len(pools) <= 1:
        return Ok(PolicyRuling(allowed_pools=pools))

    r_pool_metrics = collect_pool_metrics(pools)

    if r_pool_metrics.is_error:
        return Error(r_pool_metrics.unwrap_error())

    pool_metrics = r_pool_metrics.unwrap()

    log_dict(logger.debug, 'pool metrics', pool_metrics)

    min_usage = min(
        [metrics.current_guest_request_count for _, metrics in pool_metrics])

    return Ok(
        PolicyRuling(allowed_pools=[
            pool for pool, metrics in pool_metrics
            if metrics.current_guest_request_count == min_usage
        ]))
示例#12
0
def test_unwrap():
    o = Ok('foo')
    n = Error('foo')

    assert o.unwrap() == 'foo'

    with pytest.raises(gluetool.GlueError):
        n.unwrap()
示例#13
0
def test_expect_error():
    o = Ok('foo')
    n = Error('foo')

    with pytest.raises(gluetool.GlueError):
        o.expect_error('failure')

    assert n.expect_error('failure') == 'foo'
示例#14
0
def test_eq():
    assert Ok(1) == Ok(1)
    assert Error(1) == Error(1)
    assert Ok(1) != Error(1)
    assert Ok(1) != Ok(2)
    assert not (Ok(1) != Ok(1))
    assert Ok(1) != 'foo'
    assert Ok('0') != Ok(0)
示例#15
0
def test_unwrap_error():
    o = Ok('foo')
    n = Error('foo')

    with pytest.raises(gluetool.GlueError):
        o.unwrap_error()

    assert n.unwrap_error() == 'foo'
示例#16
0
    def _fetch_from_env(self, envvar: str) -> Result[Optional[T], 'Failure']:
        if envvar not in os.environ:
            return Ok(None)

        assert self.knob.cast_from_str is not None

        return Ok(
            self.knob.cast_from_str(os.environ[envvar])
        )
示例#17
0
def map_compose_to_imagename_by_pattern_map(
        logger: gluetool.log.ContextAdapter,
        pool: PoolDriver,
        compose_id: str,
        mapping_filename: Optional[str] = None,
        mapping_filepath: Optional[str] = None
) -> Result[Optional[str], Failure]:
    """
    Using a given pattern mapping file, try to map a compose to its corresponding image name.

    Pattern mapping files are described
    `here <https://gluetool.readthedocs.io/en/latest/gluetool.utils.html#gluetool.utils.PatternMap>`_.

    :param compose_id: compose ID to translate.
    :param mapping_filename: if set, pattern mapping file of this name is searched in Artemis' configuration directory.
    :param mapping_filepath: if set, this pattern mapping file is searched.
    :returns: either a image name, or :py:class:`tft.artemis.Failure` if the mapping was unsuccessfull.
    """

    if mapping_filepath:
        pass

    elif mapping_filename:
        mapping_filepath = os.path.join(KNOB_CONFIG_DIRPATH.value,
                                        mapping_filename)

    else:
        return Error(
            Failure('no compose/image mapping file specified',
                    compose=compose_id))

    logger.debug(f'using pattern map {mapping_filepath}')

    r_cache_enabled = KNOB_CACHE_PATTERN_MAPS.get_value(poolname=pool.poolname)

    if r_cache_enabled.is_error:
        return Error(r_cache_enabled.unwrap_error())

    r_pattern_map = get_pattern_map(logger,
                                    mapping_filepath,
                                    use_cache=r_cache_enabled.unwrap())

    if r_pattern_map.is_error:
        return Error(r_pattern_map.unwrap_error().update(compose=compose_id))

    pattern_map = r_pattern_map.unwrap()

    try:
        imagename = pattern_map.match(compose_id)

    except gluetool.glue.GlueError:
        return Ok(None)

    return Ok(imagename[0] if isinstance(imagename, list) else imagename)
示例#18
0
    def policy(logger: gluetool.log.ContextAdapter,
               session: sqlalchemy.orm.session.Session,
               pools: List[PoolDriver],
               guest_request: GuestRequest) -> PolicyReturnType:
        preferred_pools: List[PoolDriver] = [
            pool for pool in pools if isinstance(pool, preferred_drivers)
        ]

        if not preferred_pools:
            return Ok(PolicyRuling(allowed_pools=pools))

        return Ok(PolicyRuling(allowed_pools=preferred_pools))
示例#19
0
def do_test_policy_supports_snapshots(
    mock_inputs: MockInputs,
    require_snapshots: bool,
    provide_snapshots: bool,
    monkeypatch: _pytest.monkeypatch.MonkeyPatch
) -> None:
    mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs

    for mock_pool in mock_pools:
        monkeypatch.setattr(
            mock_pool,
            'capabilities',
            lambda: Ok(tft.artemis.drivers.PoolCapabilities(supports_snapshots=False))
        )

    if provide_snapshots:
        monkeypatch.setattr(
            mock_pools[0],
            'capabilities',
            lambda: Ok(tft.artemis.drivers.PoolCapabilities(supports_snapshots=True))
        )

    mock_guest_request.environment = tft.artemis.environment.Environment(
        hw=tft.artemis.environment.HWRequirements(arch='x86_64'),
        os=tft.artemis.environment.OsRequirements(compose='dummy-compose'),
        snapshots=require_snapshots
    )

    r_ruling = tft.artemis.routing_policies.policy_supports_snapshots(
        mock_logger,
        mock_session,
        mock_pools,
        mock_guest_request
    )

    assert r_ruling.is_ok

    ruling = r_ruling.unwrap()

    assert isinstance(ruling, tft.artemis.routing_policies.PolicyRuling)
    assert ruling.cancel is False

    if require_snapshots and provide_snapshots:
        assert ruling.allowed_pools == [mock_pools[0]]

    elif require_snapshots and not provide_snapshots:
        assert ruling.allowed_pools == []

    elif not require_snapshots:
        assert ruling.allowed_pools == mock_pools

    else:
        assert False, 'unreachable'
示例#20
0
def get_pattern_map(
        logger: gluetool.log.ContextAdapter,
        filepath: str,
        use_cache: bool = True) -> Result[gluetool.utils.PatternMap, Failure]:
    if not use_cache:
        try:
            return Ok(
                gluetool.utils.PatternMap(filepath,
                                          allow_variables=True,
                                          logger=logger))

        except Exception as exc:
            return Error(
                Failure.from_exc('cannot load mapping file',
                                 exc,
                                 filepath=filepath))

    def _refresh_cache() -> Result[gluetool.utils.PatternMap, Failure]:
        try:
            stat = os.stat(filepath)
            pattern_map = gluetool.utils.PatternMap(filepath,
                                                    allow_variables=True,
                                                    logger=logger)

        except Exception as exc:
            return Error(
                Failure.from_exc('cannot load mapping file',
                                 exc,
                                 filepath=filepath))

        logger.info(f'pattern-map-cache: {filepath} - refreshing')

        _PATTERN_MAP_CACHE[filepath] = (stat.st_mtime, pattern_map)

        return Ok(pattern_map)

    with _PATTERN_MAP_CACHE_LOCK:
        if filepath not in _PATTERN_MAP_CACHE:
            logger.debug(f'pattern-map-cache: {filepath} - not in cache')

            return _refresh_cache()

        stamp, pattern_map = _PATTERN_MAP_CACHE[filepath]
        stat = os.stat(filepath)

        if stat.st_mtime > stamp:
            logger.warning(f'pattern-map-cache: {filepath} - outdated')

            return _refresh_cache()

        logger.debug(f'pattern-map-cache: {filepath} - using cached')

        return Ok(pattern_map)
示例#21
0
def fixture_mock_pool(
    mockpatch: MockPatcher
) -> Tuple[PoolDriver, Callable[
    [gluetool.log.ContextAdapter, sqlalchemy.orm.session.Session, str], Result[
        tft.artemis.drivers.PoolDriver, Failure]]]:
    mock_pool = MagicMock(name='PoolDriver (mock)')

    mock_pool.release_pool_resources = MagicMock(
        name='PoolDriver.release_pool_driver (mock)', return_value=Ok(None))

    mock_get_pool = mockpatch(tft.artemis.drivers.PoolDriver, 'load')
    mock_get_pool.return_value = Ok(mock_pool)

    return mock_pool, mock_get_pool
示例#22
0
    def _run_os(
        self,
        options: List[str],
        json_format: bool = True,
        commandname: Optional[str] = None
    ) -> Result[Union[JSONType, str], Failure]:
        """
        Run os command with additional options and return output in json format

        :param List(str) options: options for the command
        :param bool json_format: returns json format if true
        :param commandname: if specified, driver will increase "CLI calls" metrics for this ``commandname``.
        :rtype: result.Result[str, Failure]
        :returns: :py:class:`result.Result` with output, or specification of error.
        """

        # Copy the command base, we don't want to spoil it for others.
        os_base = self._os_cmd_base[:]

        # -f(format) option must be placed after a command
        if json_format:
            options += ['-f', 'json']

        r_run = run_cli_tool(self.logger,
                             os_base + options,
                             json_output=json_format,
                             command_scrubber=lambda cmd:
                             (['openstack'] + options),
                             poolname=self.poolname,
                             commandname=commandname)

        if r_run.is_error:
            failure = r_run.unwrap_error()

            # Detect "instance does not exist" - this error is clearly irrecoverable. No matter how often we would
            # run this method, we would never evenr made it remove instance that doesn't exist.
            if test_cli_error(failure, MISSING_INSTANCE_ERROR_PATTERN):
                failure.recoverable = False

                PoolMetrics.inc_error(self.poolname, 'missing-instance')

            return Error(failure)

        cli_output = r_run.unwrap()

        if json_format:
            return Ok(cli_output.json)

        return Ok(cli_output.stdout)
示例#23
0
def policy_use_only_when_addressed(
        logger: gluetool.log.ContextAdapter,
        session: sqlalchemy.orm.session.Session, pools: List[PoolDriver],
        guest_request: GuestRequest) -> PolicyReturnType:
    """
    Disallow pools that are marked as to be used only when requested by name.
    """

    if guest_request.environment.pool is not None:
        return Ok(PolicyRuling(allowed_pools=pools))

    return Ok(
        PolicyRuling(allowed_pools=[
            pool for pool in pools if pool.use_only_when_addressed is False
        ]))
示例#24
0
    def _parse_job_status(
        self,
        logger: gluetool.log.ContextAdapter,
        job_results: bs4.BeautifulSoup
    ) -> Result[Tuple[str, str], Failure]:
        """
        Parse job results and return its result and status.

        :param bs4.BeautifulSoup job_results: Job results in xml format.
        :rtype: result.Result[Tuple[str, str], Failure]
        :returns: a tuple with two items, job result and status, or specification of error.
        """

        if not job_results.find('job') or len(job_results.find_all('job')) != 1:
            return Error(Failure(
                'job results XML has unknown structure',
                job_results=job_results.prettify()
            ))

        job = job_results.find('job')

        if not job['result']:
            return Error(Failure(
                'job results XML does not contain result attribute',
                job_results=job_results.prettify()
            ))

        if not job['status']:
            return Error(Failure(
                'job results XML does not contain status attribute',
                job_results=job_results.prettify()
            ))

        return Ok((job['result'].lower(), job['status'].lower()))
示例#25
0
    def _get_job_results(
        self,
        logger: gluetool.log.ContextAdapter,
        job_id: str
    ) -> Result[bs4.BeautifulSoup, Failure]:
        """
        Run 'bkr job-results' comand and return job results.

        :param str job_id: Job id that will be rescheduled.
        :rtype: result.Result[str, Failure]
        :returns: :py:class:`result.Result` with job results, or specification of error.
        """

        r_results = self._run_bkr(logger, ['job-results', job_id], commandname='bkr.job-results')

        if r_results.is_error:
            return Error(r_results.unwrap_error())

        bkr_output = r_results.unwrap()

        try:
            return Ok(bs4.BeautifulSoup(bkr_output.stdout, 'xml'))

        except Exception as exc:
            return Error(Failure.from_exc(
                'failed to parse job results XML',
                exc,
                command_output=bkr_output.process_output
            ))
示例#26
0
    def _run_bkr(
        self,
        logger: gluetool.log.ContextAdapter,
        options: List[str],
        commandname: Optional[str] = None
    ) -> Result[CLIOutput, Failure]:
        """
        Run bkr command with additional options

        :param gluetool.log.ContextAdapter logger: logger to use for logging.
        :param List(str) options: options for the command
        :returns: either a valid result, :py:class:`CLIOutput` instance, or an error with a :py:class:`Failure`
            describing the problem.
        """

        r_run = run_cli_tool(
            logger,
            self._bkr_command + options,
            json_output=False,
            poolname=self.poolname,
            commandname=commandname
        )

        if r_run.is_error:
            return Error(r_run.unwrap_error())

        return Ok(r_run.unwrap())
示例#27
0
    def refresh_avoid_groups_hostnames(self, logger: ContextAdapter) -> Result[None, Failure]:
        groups: List[AvoidGroupHostnames] = []

        r_avoid_groups = self.avoid_groups

        if r_avoid_groups.is_error:
            return Error(r_avoid_groups.unwrap_error())

        for groupname in r_avoid_groups.unwrap():
            r_list = self._fetch_avoid_group_hostnames(logger, groupname)

            if r_list.is_error:
                return Error(r_list.unwrap_error())

            groups.append(AvoidGroupHostnames(
                groupname=groupname,
                hostnames=r_list.unwrap()
            ))

        r_refresh = refresh_cached_set(
            CACHE.get(),
            self.avoid_groups_hostnames_cache_key,
            {
                h.groupname: h
                for h in groups
            }
        )

        if r_refresh.is_error:
            return Error(r_refresh.unwrap_error())

        return Ok(None)
示例#28
0
def test_boilerplate(mock_inputs: MockInputs) -> None:
    mock_return_value: tft.artemis.routing_policies.PolicyReturnType = Ok(MagicMock(name='policy_ruling<mock>'))

    mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs

    @tft.artemis.routing_policies.policy_boilerplate
    def policy_dummy_whatever(
        logger: gluetool.log.ContextAdapter,
        session: sqlalchemy.orm.session.Session,
        pools: List[PoolDriver],
        guest_request: tft.artemis.db.GuestRequest
    ) -> tft.artemis.routing_policies.PolicyReturnType:
        assert isinstance(logger, tft.artemis.routing_policies.PolicyLogger)
        assert logger._contexts == {'policy_name': (50, 'dummy-whatever')}

        assert session is mock_session
        assert pools is mock_pools
        assert guest_request is mock_guest_request

        return mock_return_value

    r_ruling = policy_dummy_whatever(mock_logger, mock_session, mock_pools, mock_guest_request)

    assert r_ruling is mock_return_value
    assert r_ruling.unwrap() is mock_return_value.unwrap()
示例#29
0
def test_collect_pool_capabilities(
    mock_inputs: MockInputs,
    mockpatch: MockPatcher
) -> None:
    mock_logger, mock_session, mock_pools, mock_guest_request = mock_inputs

    mock_capabilities = [
        MagicMock(name=f'{pool.poolname}.capabilities<mock>') for pool in mock_pools
    ]

    for pool, capabilities in zip(mock_pools, mock_capabilities):
        mockpatch(pool, 'capabilities').return_value = Ok(capabilities)

    r = tft.artemis.routing_policies.collect_pool_capabilities(mock_pools)

    assert r.is_ok

    collected = r.unwrap()

    for i in range(0, len(mock_pools)):
        expected_pool = mock_pools[i]
        expected_capabilities = mock_capabilities[i]

        actual_pool, actual_capabilities = collected[i]

        assert actual_pool is expected_pool
        assert actual_capabilities is expected_capabilities
示例#30
0
def test_sanity(log):
    # type: (Any) -> None

    return_values = [
        Error('failed first time'),
        Error('failed second time'),
        Ok('finally passed')
    ]  # type: List[Result[str, str]]

    def _check():
        # type: () -> Result[str, str]

        return return_values.pop(0)

    wait('dummy check', _check, timeout=10, tick=2)

    assert len(log.records) == 9

    # todo: check decreasing remaining time
    # pylint: disable=line-too-long
    assert re.match(
        r"waiting for condition 'dummy check', timeout \d seconds, check every 2 seconds",
        log.records[0].message) is not None  # Ignore PEP8Bear
    assert log.records[1].message == 'calling callback function'
    assert log.records[
        2].message == 'check failed with \'failed first time\', assuming failure'
    assert re.match(r'\d seconds left, sleeping for 2 seconds$',
                    log.records[3].message) is not None
    assert log.records[4].message == 'calling callback function'
    assert log.records[
        5].message == 'check failed with \'failed second time\', assuming failure'
    assert re.match(r'\d seconds left, sleeping for 2 seconds$',
                    log.records[6].message) is not None
    assert log.records[7].message == 'calling callback function'
    assert log.records[8].message == 'check passed, assuming success'