示例#1
0
def jobs(
        draw,
        ids=uuids(),
        statuses=sampled_from(JobInterface.JobStatus),
        parameters=dictionaries(text(), text()),
        results=dictionaries(text(), text()),
        dates_submitted=datetimes(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text())
) -> JobInterface:
    """

    :param draw: A function that can take a strategy and draw a datum from it
    :param ids: A hypothesis strategy (statisticians should read "random
        variable"), that represents the set of all valid job IDs
    :param statuses: A hypothesis strategy that samples from the set of all
        allowed job statuses
    :param parameters: A hypothesis strategy that samples from all job
        parameters
    :param results: A hypothesis strategy that represents the possible results
    :param dates_submitted: A hypothesis strategy that represents the
        possible dates that can be submitted
    :param registration_schemas: The possible job registration schemas
    :param result_schemas: The possible job result schemas
    :return: A randomly-generated implementation of :class:`JobInterface`
    """
    return Job(
        draw(ids), draw(statuses), draw(parameters), draw(results),
        draw(dates_submitted),
        draw(registration_schemas),
        draw(result_schemas)
    )
示例#2
0
def test_dictionary(dict_class):
    assert minimal(dictionaries(keys=integers(), values=text(), dict_class=dict_class)) == dict_class()

    x = minimal(dictionaries(keys=integers(), values=text(), dict_class=dict_class), lambda t: len(t) >= 3)
    assert isinstance(x, dict_class)
    assert set(x.values()) == set((u"",))
    for k in x:
        if k < 0:
            assert k + 1 in x
        if k > 0:
            assert k - 1 in x
def _valid_post_requests(
        draw,
        names: str=text(),
        descriptions: str=text(),
        job_registration_schemas: dict=dictionaries(text(), text()),
        job_result_schemas: dict=dictionaries(text(), text())
) -> dict:
    return {
        'name': draw(names),
        'description': draw(descriptions),
        'job_registration_schema': draw(job_registration_schemas),
        'job_result_schema': draw(job_result_schemas)
    }
示例#4
0
def services(
        draw,
        names=text(),
        descriptions=text(),
        registration=dictionaries(text(), text()),
        results=dictionaries(text(), text())
) -> Service:
    return Service.new(
        name=draw(names),
        description=draw(descriptions),
        registration_schema=draw(registration),
        result_schema=draw(results)
    )
def http_headers():
    """
    Strategy for generating ``Headers`` populated with random HTTP
    headers.

    This could probably use some more work.
    """
    return dictionaries(
        keys=sampled_from((
            b"accept",
            b"accept-charset",
            b"accept-encoding",
            b"accept-language",
            b"accept-ranges",
            b"age",
            b"allow",
            b"authorization",
            b"cache-control",
            b"connection",
            b"content-encoding",
            b"content-language",
            # XXX The rest, I guess, plus randomly generate some?
        )),
        values=text().map(lambda x: x.encode("utf-8")),
    ).map(
        lambda h: Headers({k: [v] for (k, v) in h.items()})
    )
示例#6
0
def jenkins_build_results(inQueue=None, builds=None):
    """Create a strategy for generating Jenkins API information for a job.

    :param strategy inQueue: strategy for the inQueue key, or None to use
        the default.
    :param strategy builds: strategy for populating the builds key, or None
        for the default. The special value `NO_BUILDS` will mean that the
        builds key is not in the resulting dict at all.
    :return strategy: a strategy.
    """
    strats = []
    if inQueue is None:
        inQueue = booleans()
        strats.append(just(pmap()))
    without_builds = fixed_dictionaries(dict(
        inQueue=inQueue))
    if builds is None or builds is NO_BUILDS:
        strats.append(without_builds)
    if builds is None:
        builds = lists(jenkins_builds, average_size=1)
    if builds is not NO_BUILDS:
        with_builds = fixed_dictionaries(dict(
            inQueue=inQueue,
            builds=builds,
            property=dictionaries(
                text(max_size=2), text(max_size=2),
                average_size=1, max_size=2)))
        strats.append(with_builds)
    return one_of(*strats)
示例#7
0
def valid_dictionaries(keys, values):
    return dictionaries(keys, values, average_size=5).filter(
        lambda data: (
            sum(len(key.encode('utf-8')) for key in data) <=
            MAX_TOTAL_NAME_LENGTH
        ),
    )
示例#8
0
def services(
        draw,
        ids=uuids(),
        names=text(),
        descriptions=text(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text()),
        are_available=booleans(),
        service_job_lists=job_lists(),
        timeouts=timedeltas()
) -> ServiceInterface:
    return Service(
        draw(ids), draw(names), draw(descriptions),
        draw(registration_schemas), draw(result_schemas),
        draw(are_available), draw(service_job_lists),
        draw(timeouts)
    )
def http_query_args():
    """
    Strategy for generating some UTF-8 key/value-list pairs usable as
    query arguments in a request path.
    """
    return dictionaries(
        keys=text().map(lambda x: x.encode("utf-8")),
        values=lists(text().map(lambda x: x.encode("utf-8")), min_size=1),
    )
def test_dictionaries_of_fixed_length(n):
    x = set(find(
        dictionaries(integers(), booleans(), min_size=n, max_size=n),
        lambda x: True).keys())

    if not n:
        assert x == set()
    else:
        assert x == set(range(min(x), min(x) + n))
示例#11
0
文件: strategies.py 项目: mirca/sunpy
def timedelta(draw):
    """
    Timedelta strategy that limits the maximum timedelta to being positive and
    abs max is about 100 weeks + 100 days + 100 hours + a bit
    """
    keys = st.sampled_from(['days', 'seconds', 'microseconds', 'milliseconds',
                            'minutes', 'hours', 'weeks'])
    values = st.floats(min_value=0, max_value=100)
    return datetime.timedelta(**draw(st.dictionaries(keys, values)))
示例#12
0
def jobs(
        draw,
        services=service_generator(),
        parameters=dictionaries(text(), text()),
) -> Job:
    return Job.new(
        draw(services),
        draw(parameters)
    )
示例#13
0
 def visit_map(self, shape):
     if shape.name in self.active:
         return strategies.fixed_dictionaries({})
     self.active.add(shape.name)
     try:
         return strategies.dictionaries(
             keys=self.visit(shape.key_shape),
             values=self.visit(shape.value_shape),
         )
     finally:
         self.active.remove(shape.name)
示例#14
0
文件: utils.py 项目: EdgarChen/servo
def simple_attrs_with_metadata(draw):
    """
    Create a simple attribute with arbitrary metadata.
    """
    c_attr = draw(simple_attrs)
    keys = st.booleans() | st.binary() | st.integers() | st.text()
    vals = st.booleans() | st.binary() | st.integers() | st.text()
    metadata = draw(st.dictionaries(keys=keys, values=vals))

    return attr.ib(c_attr._default, c_attr._validator, c_attr.repr,
                   c_attr.cmp, c_attr.hash, c_attr.init, c_attr.convert,
                   metadata)
示例#15
0
def jsonComposites(children):
    """
    Creates a Hypothesis strategy that constructs composite
    JSON-serializable objects (e.g., lists).

    @param children: A strategy from which each composite object's
        children will be drawn.

    @return: The composite objects strategy.
    """
    return (st.lists(children) |
            st.dictionaries(st.text(printable), children) |
            st.tuples(children))
def monoids():
    base = many_one_of(
        strat.integers(),
        strat.lists(strat.integers()),
        strat.lists(strat.integers()).map(tuple),
        strat.text(),
        strat.integers().map(MonoidProduct),
        strat.dictionaries(strat.integers(), strat.integers()),
    )

    def recurse(substrat):
        return stream_apply_strat(maybes(), substrat)

    return strat.recursive(base, recurse)
def test_non_trivial_json():
    json = st.deferred(lambda: st.none() | st.floats() | st.text() | lists | objects)

    lists = st.lists(json)
    objects = st.dictionaries(st.text(), json)

    assert minimal(json) is None

    small_list = minimal(json, lambda x: isinstance(x, list) and x)
    assert small_list == [None]

    x = minimal(json, lambda x: isinstance(x, dict) and isinstance(x.get(""), list))

    assert x == {"": []}
示例#18
0
 def export(cls):
     """
     Strategy that generates :py:class:`scality_manila_utils.export.Export`.
     """
     export_point = cls.path()
     host = cls.host()
     options = sets(cls.options(), average_size=3)
     clients = dictionaries(
         keys=host,
         values=options,
         min_size=1,
         average_size=5
     )
     return builds(Export, export_point, clients)
示例#19
0
def TimeDelta(draw):
    """
    Timedelta strategy that limits the maximum timedelta to being positive and
    abs max is about 10 weeks + 10 days + 10 hours + 10 minutes + a bit
    """
    keys = st.sampled_from(['weeks', 'days', 'hours', 'minutes', 'seconds'])
    values = st.floats(min_value=1, max_value=10)
    delta = datetime.timedelta(**draw(st.dictionaries(keys, values)))
    delta = astropy.time.TimeDelta(delta, format='datetime')

    # We don't want a 0 timedelta
    assume(delta.sec > 0)

    return delta
示例#20
0
def type_to_strat(x, opts): # type: (type) -> SearchStrategy
   '''
   Given a type, return a strategy which yields a value of that type. Types maybe complex: Union, NamedTuple, etc.
   For more information, see https://docs.python.org/3/library/typing.html
   Usage:
   >>> type_to_strat(Union[int,str]).exmample()
   . . . 3
   '''
   recur = lambda y: type_to_strat(y, opts)
   if x in primitives:
       prim =  primitives[x].filter(opts.get(x, lambda x: x))
       return prim
   elif hasattr(x, '_fields'):# NamedTuple isn't a type, it's a function
   #elif isinstance(x, Callable): #this catches List[T] for some reason
       name = x.__name__
       fts = OrderedDict(x._field_types)
       vals = map(recur, fts.values())
       # `NamedTuple` is actually a ... `namedtuple` itself
       toArgDict = lambda xs: dict(zip(fts.keys(), xs))
       return st.tuples(*vals).map(lambda ys: x(**toArgDict(ys)))
   elif issubclass(x, Dict):
       return st.dictionaries(*map(recur, x.__parameters__))
   elif issubclass(x, Tuple):
       if x.__tuple_use_ellipsis__: # variable lenth tuple
           element_type = x.__tuple_params__[0]
           return recur(List[element_type]).map(tuple)
       return st.tuples(*map(recur, x.__tuple_params__))
   elif issubclass(x, Union):
       return reduce(operator.ior, map(recur, x.__union_params__))
   elif issubclass(x, Optional):
       # Optional[X] is equivalent to Union[X, type(None)]. second param is always Nonetype.
       value = x.__union_params__[0]
       return (recur(value) | st.none()) # type: SearchStrategy
   else:
       element_type = recur(x.__parameters__[0])
       if issubclass(x, list):
           return st.lists(element_type)
       elif issubclass(x, set):
           return st.sets(element_type)
       elif issubclass(x, Sequence):
           anySizeTuple = recur(Tuple[element_type,...])
           return st.sets(element_type) | st.lists(element_type) | anySizeTuple
       elif issubclass(x, Generator):
           toGen = lambda xs: (x for x in xs) # type: Callable[[Iterable[T]], Generator[T]]
           return recur(List[element_type]).map(toGen)
       # not sure how to create an Iterable (it doesn't have an `__next__` method)
       elif issubclass(x, Iterator)  or issubclass(x, Iteratable):
           return recur(List[element_type]).map(iter)
       else:
           raise ValueError("Could not find strategy for type %s" % x)
示例#21
0
def objects(elements=None, *,
            required_fields=None,
            optional_fields=None,
            min_size=None,
            average_size=None,
            max_size=None):
    """Returns a strategy that generates dicts of specified `elements`.

    The `elements` must be valid Hypothesis strategy. The keys are ensured to
    be string only as JSON requires.

    While choice of `elements` left here for user side, it's not recommended to
    use anything that produces non-serializable to JSON values.

    Also possible to ensure that some fields with values generated by a certain
    strategy are always exists (`required_fields`) or just optional
    (`optional_fields`).
    """
    def check_type(varname, var, expected):
        if not isinstance(var, expected):
            raise TypeError('{} must be {}, got {}'.format(
                varname, expected, type(var)))

    acc = []
    if required_fields:
        check_type('required_fields', required_fields, dict)
        for key in required_fields:
            check_type('required field name', key, str)
        acc.append(st.fixed_dictionaries(required_fields))

    if optional_fields:
        check_type('optional_fields', optional_fields, dict)
        for key in optional_fields:
            check_type('optional field name', key, str)
        acc.append(st.sets(st.sampled_from(optional_fields)).flatmap(
            lambda keys: st.fixed_dictionaries({key: optional_fields[key]
                                                for key in keys})))
    if elements:
        acc.append(st.dictionaries(strings(), elements,
                                   min_size=min_size,
                                   average_size=average_size,
                                   max_size=max_size))

    if not acc:
        raise RuntimeError('object must have any strategy for fields')

    return st.tuples(*reversed(acc)).map(
        lambda s: reduce(lambda a, d: dict(a, **d), s))
示例#22
0
def simple_attrs_with_metadata(draw):
    """
    Create a simple attribute with arbitrary metadata.
    """
    c_attr = draw(simple_attrs)
    keys = st.booleans() | st.binary() | st.integers() | st.text()
    vals = st.booleans() | st.binary() | st.integers() | st.text()
    metadata = draw(st.dictionaries(
        keys=keys, values=vals, min_size=1, max_size=5))

    return attr.ib(
        default=c_attr._default,
        validator=c_attr._validator,
        repr=c_attr.repr,
        cmp=c_attr.cmp,
        hash=c_attr.hash,
        init=c_attr.init,
        metadata=metadata,
        type=None,
        converter=c_attr.converter,
    )
def test_produces_dictionaries_of_at_least_minimum_size():
    t = minimal(
        ds.dictionaries(ds.booleans(), ds.integers(), min_size=2), lambda x: True
    )
    assert t == {False: 0, True: 0}
示例#24
0
def json_strat(depth=5):
    return st.recursive(st.floats() | st.booleans() | st.text() | st.none(),
                        lambda children: st.dictionaries(st.text(), children),
                        max_leaves=depth)
示例#25
0
def random_dict_variable_homogeneous_value(**kwargs):
    """Return a strategy which generates a random dictionary of variable name and value"""
    return primitive_types.flatmap(
        lambda s: hs.dictionaries(valid_identifier(), s(), **kwargs))
示例#26
0

@pytest.mark.parametrize("tree_type", [AVLTree, RBTree])
@given(dict_and_key())  # pylint: disable=no-value-for-parameter
def test_pop(tree_type, givens):
    items, test_key = givens
    tree = AVLTree()

    for k, v in items.items():
        tree[k] = v

    assert tree.pop(test_key) == items.pop(test_key)


@pytest.mark.parametrize("tree_type", [AVLTree, RBTree])
@given(st.dictionaries(st.integers(), st.uuids()))
def test_iter(tree_type, items):
    keys = sorted(items.keys())
    tree = AVLTree()

    for k, v in items.items():
        assert tree.insert(k, v) is None

    ret = list(tree.items())

    assert len(ret) == len(keys)
    for k1, kv in zip(keys, ret):
        assert k1 == kv[0]
        assert items[k1] == kv[1]

示例#27
0
    return builds(ABC, x, y, z)


standard_types = [
    lists(none(), max_size=0),
    tuples(),
    sets(none(), max_size=0),
    frozensets(none(), max_size=0),
    fixed_dictionaries({}),
    abc(booleans(), booleans(), booleans()),
    abc(booleans(), booleans(), integers()),
    fixed_dictionaries({
        "a": integers(),
        "b": booleans()
    }),
    dictionaries(booleans(), integers()),
    dictionaries(text(), booleans()),
    one_of(integers(), tuples(booleans())),
    sampled_from(range(10)),
    one_of(just("a"), just("b"), just("c")),
    sampled_from(("a", "b", "c")),
    integers(),
    integers(min_value=3),
    integers(min_value=(-(2**32)), max_value=(2**64)),
    floats(),
    floats(min_value=-2.0, max_value=3.0),
    floats(),
    floats(min_value=-2.0),
    floats(),
    floats(max_value=-0.0),
    floats(),
示例#28
0
class TestClusterMaster(BaseUnitTestCase):
    _PAGINATION_OFFSET = 0
    _PAGINATION_LIMIT = 5
    _PAGINATION_MAX_LIMIT = 10
    _NUM_BUILDS = _NUM_SUBJOBS = _NUM_ATOMS = 20

    def setUp(self):
        super().setUp()
        self.patch('app.util.fs.create_dir')
        self.patch('app.util.fs.async_delete')
        self.patch('os.makedirs')
        self.mock_slave_allocator = self.patch(
            'app.master.cluster_master.SlaveAllocator').return_value
        self.mock_scheduler_pool = self.patch(
            'app.master.cluster_master.BuildSchedulerPool').return_value

        # Two threads are ran everytime we start up the ClusterMaster. We redirect the calls to
        # `ThreadPoolExecutor.submit` through a mock proxy so we can capture events.
        self.thread_pool_executor = ThreadPoolExecutor(max_workers=2)
        self._thread_pool_executor_cls = self.patch(
            'app.master.cluster_master.ThreadPoolExecutor')
        self._thread_pool_executor_cls.return_value.submit.side_effect = \
            self.thread_pool_executor.submit

        Configuration['pagination_offset'] = self._PAGINATION_OFFSET
        Configuration['pagination_limit'] = self._PAGINATION_LIMIT
        Configuration['pagination_max_limit'] = self._PAGINATION_MAX_LIMIT
        Configuration['database_name'] = TEST_DB_NAME
        Configuration['database_url'] = TEST_DB_URL
        Connection.create(Configuration['database_url'])
        BuildStore._cached_builds_by_id.clear()

    def tearDown(self):
        super().tearDown()
        self.thread_pool_executor.shutdown()

    def tearDownClass():
        """Delete testing database after we're done"""
        remove(TEST_DB_NAME)

    @genty_dataset(
        slave_id_specified=({
            'slave_id': 400
        }, ),
        slave_url_specified=({
            'slave_url': 'michelangelo.turtles.gov'
        }, ),
    )
    def test_get_slave_raises_exception_on_slave_not_found(
            self, get_slave_kwargs):
        master = ClusterMaster()
        master.connect_slave('raphael.turtles.gov', 10)
        master.connect_slave('leonardo.turtles.gov', 10)
        master.connect_slave('donatello.turtles.gov', 10)

        with self.assertRaises(ItemNotFoundError):
            master.get_slave(**get_slave_kwargs)

    @genty_dataset(
        both_arguments_specified=({
            'slave_id': 1,
            'slave_url': 'raphael.turtles.gov'
        }, ),
        neither_argument_specified=({}, ),
    )
    def test_get_slave_raises_exception_on_invalid_arguments(
            self, get_slave_kwargs):
        master = ClusterMaster()
        master.connect_slave('raphael.turtles.gov', 10)

        with self.assertRaises(ValueError):
            master.get_slave(**get_slave_kwargs)

    def test_get_slave_returns_expected_value_given_valid_arguments(self):
        master = ClusterMaster()
        master.connect_slave('raphael.turtles.gov', 10)
        master.connect_slave('leonardo.turtles.gov', 10)
        master.connect_slave('donatello.turtles.gov', 10)

        actual_slave_by_id = master.get_slave(slave_id=2)
        actual_slave_by_url = master.get_slave(
            slave_url='leonardo.turtles.gov')

        self.assertEqual(
            2, actual_slave_by_id.id,
            'Retrieved slave should have the same id as requested.')
        self.assertEqual(
            'leonardo.turtles.gov', actual_slave_by_url.url,
            'Retrieved slave should have the same url as requested.')

    def test_connect_slave_adds_new_slave_if_slave_never_connected_before(
            self):
        master = ClusterMaster()

        master.connect_slave('never-before-seen.turtles.gov', 10)

        self.assertEqual(
            1, len(master.all_slaves_by_id()),
            'Exactly one slave should be registered with the master.')
        self.assertIsNotNone(
            master.get_slave(slave_id=None,
                             slave_url='never-before-seen.turtles.gov'),
            'Registered slave does not have the expected url.')

    def test_connect_slave_with_existing_dead_slave_creates_new_alive_instance(
            self):
        master = ClusterMaster()
        master.connect_slave('existing-slave.turtles.gov', 10)
        existing_slave = master.get_slave(
            slave_id=None, slave_url='existing-slave.turtles.gov')
        existing_slave.set_is_alive(False)
        existing_slave_id = existing_slave.id

        connect_response = master.connect_slave('existing-slave.turtles.gov',
                                                10)
        new_slave = master._all_slaves_by_url.get('existing-slave.turtles.gov')

        self.assertNotEqual(
            str(existing_slave_id), connect_response['slave_id'],
            'The re-connected slave should have generated a new slave id.')
        self.assertTrue(
            new_slave.is_alive(use_cached=True),
            'The new slave should have been marked as alive once instantiated.'
        )
        self.assertEquals(
            2, self.mock_slave_allocator.add_idle_slave.call_count,
            'Expected slave to be added to the idle slaves list.')

    def test_connect_slave_with_existing_slave_running_build_cancels_build(
            self):
        master = ClusterMaster()
        master.connect_slave('running-slave.turtles.gov', 10)
        build_mock = MagicMock(spec_set=Build)
        BuildStore._cached_builds_by_id[1] = build_mock
        existing_slave = master.get_slave(
            slave_id=None, slave_url='running-slave.turtles.gov')
        existing_slave.current_build_id = 1

        master.connect_slave('running-slave.turtles.gov', 10)

        self.assertTrue(build_mock.cancel.called,
                        'The build was not cancelled.')

    def test_update_build_with_valid_params_succeeds(self):
        build_id = 1
        update_params = {'key': 'value'}
        master = ClusterMaster()
        build = Mock()
        BuildStore._cached_builds_by_id[build_id] = build
        build.validate_update_params = Mock(return_value=(True, update_params))
        build.update_state = Mock()

        success, response = master.handle_request_to_update_build(
            build_id, update_params)

        build.update_state.assert_called_once_with(update_params)
        self.assertTrue(success, "Update build should return success")
        self.assertEqual(response, {}, "Response should be empty")

    def test_update_build_with_bad_build_id_fails(self):
        build_id = 1
        invalid_build_id = 0
        update_params = {'key': 'value'}
        master = ClusterMaster()
        build = Mock()
        BuildStore._cached_builds_by_id[build_id] = build
        build.validate_update_params = Mock(return_value=(True, update_params))
        build.update_state = Mock()

        with self.assertRaises(ItemNotFoundError):
            master.handle_request_to_update_build(invalid_build_id,
                                                  update_params)

    def test_updating_slave_to_disconnected_state_should_mark_slave_as_dead(
            self):
        master = ClusterMaster()
        slave_url = 'raphael.turtles.gov'
        master.connect_slave(slave_url, num_executors=10)
        slave = master.get_slave(slave_url=slave_url)
        self.assertTrue(slave.is_alive())

        master.handle_slave_state_update(slave, SlaveState.DISCONNECTED)

        self.assertFalse(slave.is_alive())

    def test_updating_slave_to_disconnected_state_should_reset_slave_current_build_id(
            self):
        master = ClusterMaster()
        slave_url = 'raphael.turtles.gov'
        master.connect_slave(slave_url, num_executors=10)
        slave = master.get_slave(slave_url=slave_url)
        slave.current_build_id = 4

        master.handle_slave_state_update(slave, SlaveState.DISCONNECTED)

        self.assertIsNone(slave.current_build_id)

    def test_updating_slave_to_setup_completed_state_should_tell_build_to_begin_subjob_execution(
            self):
        master = ClusterMaster()
        fake_build = MagicMock(spec_set=Build)
        master.get_build = MagicMock(return_value=fake_build)
        slave_url = 'raphael.turtles.gov'
        master.connect_slave(slave_url, 10)
        slave = master.get_slave(slave_url=slave_url)
        mock_scheduler = self.mock_scheduler_pool.get(fake_build)
        scheduler_begin_event = Event()
        mock_scheduler.begin_subjob_executions_on_slave.side_effect = lambda **_: scheduler_begin_event.set(
        )

        master.handle_slave_state_update(slave, SlaveState.SETUP_COMPLETED)

        was_called = scheduler_begin_event.wait(timeout=5)
        self.assertTrue(
            was_called,
            'scheduler.begin_subjob_executions_on_slave should be called in response '
            'to slave setup completing.')
        _, call_kwargs = mock_scheduler.begin_subjob_executions_on_slave.call_args
        self.assertEqual(call_kwargs.get('slave'), slave)

    def test_updating_slave_to_shutdown_should_call_slave_set_shutdown_mode(
            self):
        master = ClusterMaster()
        slave_url = 'raphael.turtles.gov'
        master.connect_slave(slave_url, 10)
        slave = master.get_slave(slave_url=slave_url)
        slave.set_shutdown_mode = Mock()

        master.handle_slave_state_update(slave, SlaveState.SHUTDOWN)

        slave.set_shutdown_mode.assert_called_once_with()

    def test_updating_slave_to_nonexistent_state_should_raise_bad_request_error(
            self):
        master = ClusterMaster()
        slave_url = 'raphael.turtles.gov'
        master.connect_slave(slave_url, 10)
        slave = master.get_slave(slave_url=slave_url)

        with self.assertRaises(BadRequestError):
            master.handle_slave_state_update(slave, 'NONEXISTENT_STATE')

    def test_handle_result_reported_from_slave_when_build_is_canceled(self):
        build_id = 1
        slave_url = "url"
        build = Build(BuildRequest({}))
        self.patch('app.master.build.util')
        build.generate_project_type()
        build.cancel()

        self.patch_object(build, '_handle_subjob_payload')
        self.patch_object(build, '_mark_subjob_complete')

        master = ClusterMaster()
        BuildStore._cached_builds_by_id[build_id] = build
        master._all_slaves_by_url[slave_url] = Mock()
        mock_scheduler = self.mock_scheduler_pool.get(build)

        master.handle_result_reported_from_slave(slave_url, build_id, 1)

        self.assertEqual(build._handle_subjob_payload.call_count, 1,
                         "Canceled builds should "
                         "handle payload")
        self.assertEqual(
            build._mark_subjob_complete.call_count, 1,
            "Canceled builds should mark "
            "their subjobs complete")
        self.assertTrue(
            mock_scheduler.execute_next_subjob_or_free_executor.called)

    def test_exception_raised_during_complete_subjob_does_not_prevent_slave_teardown(
            self):
        slave_url = 'raphael.turtles.gov'
        mock_build = Mock(spec_set=Build,
                          build_id=lambda: 777,
                          is_finished=False)
        mock_build.complete_subjob.side_effect = [RuntimeError('Write failed')]

        master = ClusterMaster()
        BuildStore._cached_builds_by_id[mock_build.build_id()] = mock_build
        master._all_slaves_by_url[slave_url] = Mock()
        mock_scheduler = self.mock_scheduler_pool.get(mock_build)

        with self.assertRaisesRegex(RuntimeError, 'Write failed'):
            master.handle_result_reported_from_slave(slave_url,
                                                     mock_build.build_id(),
                                                     subjob_id=888)

        self.assertEqual(
            mock_scheduler.execute_next_subjob_or_free_executor.call_count, 1)

    @given(dictionaries(text(), text()))
    def test_handle_request_for_new_build_does_not_raise_exception(
            self, build_params):
        master = ClusterMaster()
        master.handle_request_for_new_build(build_params)

    @given(integers(), dictionaries(text(), text()))
    def test_handle_request_to_update_build_does_not_raise_exception(
            self, build_id, update_params):
        master = ClusterMaster()
        BuildStore._cached_builds_by_id = {build_id: Build({})}
        master.handle_request_to_update_build(build_id, update_params)

    @genty_dataset(
        # No params simulates a v1 request
        no_params=(None, None, 1, 0 + _NUM_BUILDS),
        # Params simulate a v2 request
        offset_param=(3, _PAGINATION_LIMIT, 3 + 1, 3 + _PAGINATION_LIMIT),
        limit_param=(_PAGINATION_OFFSET, 5, _PAGINATION_OFFSET + 1,
                     _PAGINATION_OFFSET + 5),
        offset_and_limit_params=(3, 5, 3 + 1, 3 + 5),
        low_limit=(_PAGINATION_OFFSET, 2, _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + 2),
        max_limit=(_PAGINATION_OFFSET, _PAGINATION_MAX_LIMIT,
                   _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + _PAGINATION_MAX_LIMIT),
        too_high_offset=(1000, _PAGINATION_LIMIT, None, None),
    )
    def test_builds_with_pagination_request(
        self,
        offset: Optional[int],
        limit: Optional[int],
        expected_first_build_id: int,
        expected_last_build_id: int,
    ):
        master = ClusterMaster()
        # Create 20 mock builds with ids 1 to 20
        for build_id in range(1, self._NUM_BUILDS + 1):
            build_mock = Mock(spec=Build)
            build_mock.build_id = build_id
            BuildStore._cached_builds_by_id[build_id] = build_mock

        # Normally `get_builds` counts the amount of builds in database, but since we're directly
        # adding builds into the cache here, we want to count those instead.
        self.patch('app.database.build_store.BuildStore.count_all_builds',
                   autospec=False).return_value = len(
                       BuildStore._cached_builds_by_id)
        requested_builds = master.get_builds(offset, limit)

        id_of_first_build = requested_builds[0].build_id if len(
            requested_builds) else None
        id_of_last_build = requested_builds[-1].build_id if len(
            requested_builds) else None
        num_builds = len(requested_builds)

        self.assertEqual(id_of_first_build, expected_first_build_id,
                         'Received the wrong first build from request')
        self.assertEqual(id_of_last_build, expected_last_build_id,
                         'Received the wrong last build from request')
        if offset is not None and limit is not None:
            self.assertLessEqual(num_builds, self._PAGINATION_MAX_LIMIT,
                                 'Received too many builds from request')

    @genty_dataset(
        # No params simulates a v1 request
        no_params=(None, None, 1, 0 + _NUM_SUBJOBS),
        # Params simulate a v2 request
        offset_param=(3, _PAGINATION_LIMIT, 3 + 1, 3 + _PAGINATION_LIMIT),
        limit_param=(_PAGINATION_OFFSET, 5, _PAGINATION_OFFSET + 1,
                     _PAGINATION_OFFSET + 5),
        offset_and_limit_params=(3, 5, 3 + 1, 3 + 5),
        low_limit=(_PAGINATION_OFFSET, 2, _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + 2),
        max_limit=(_PAGINATION_OFFSET, _PAGINATION_MAX_LIMIT,
                   _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + _PAGINATION_MAX_LIMIT),
        too_high_offset=(1000, _PAGINATION_LIMIT, None, None),
    )
    def test_subjobs_with_pagination_request(
        self,
        offset: Optional[int],
        limit: Optional[int],
        expected_first_subjob_id: int,
        expected_last_subjob_id: int,
    ):
        build = Build(BuildRequest({}))
        # Create 20 mock subjobs with ids 1 to 20
        for subjob_id in range(1, self._NUM_SUBJOBS + 1):
            subjob_mock = Mock(spec=Subjob)
            subjob_mock.subjob_id = subjob_id
            build._all_subjobs_by_id[subjob_id] = subjob_mock

        requested_subjobs = build.get_subjobs(offset, limit)

        id_of_first_subjob = requested_subjobs[0].subjob_id if len(
            requested_subjobs) else None
        id_of_last_subjob = requested_subjobs[-1].subjob_id if len(
            requested_subjobs) else None
        num_subjobs = len(requested_subjobs)

        self.assertEqual(id_of_first_subjob, expected_first_subjob_id,
                         'Received the wrong first subjob from request')
        self.assertEqual(id_of_last_subjob, expected_last_subjob_id,
                         'Received the wrong last subjob from request')
        if offset is not None and limit is not None:
            self.assertLessEqual(num_subjobs, self._PAGINATION_MAX_LIMIT,
                                 'Received too many subjobs from request')

    @genty_dataset(
        # No params simulates a v1 request
        no_params=(None, None, 1, 0 + _NUM_ATOMS),
        # Params simulate a v2 request
        offset_param=(3, _PAGINATION_LIMIT, 3 + 1, 3 + _PAGINATION_LIMIT),
        limit_param=(_PAGINATION_OFFSET, 5, _PAGINATION_OFFSET + 1,
                     _PAGINATION_OFFSET + 5),
        offset_and_limit_params=(3, 5, 3 + 1, 3 + 5),
        low_limit=(_PAGINATION_OFFSET, 2, _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + 2),
        max_limit=(_PAGINATION_OFFSET, _PAGINATION_MAX_LIMIT,
                   _PAGINATION_OFFSET + 1,
                   _PAGINATION_OFFSET + _PAGINATION_MAX_LIMIT),
        too_high_offset=(1000, _PAGINATION_LIMIT, None, None),
    )
    def test_atoms_with_pagination_request(
        self,
        offset: Optional[int],
        limit: Optional[int],
        expected_first_atom_id: int,
        expected_last_atom_id: int,
    ):
        # Create 20 mock atoms with ids 1 to 20
        atoms = []
        for atom_id in range(1, self._NUM_ATOMS + 1):
            atom_mock = Mock(spec=Atom)
            atom_mock.id = atom_id
            atoms.append(atom_mock)

        build_id = 1
        subjob_id = 1
        project_type = None
        job_config = None
        subjob_atoms = atoms
        subjob = Subjob(build_id, subjob_id, project_type, job_config, atoms)

        requested_atoms = subjob.get_atoms(offset, limit)

        id_of_first_atom = requested_atoms[0].id if len(
            requested_atoms) else None
        id_of_last_atom = requested_atoms[-1].id if len(
            requested_atoms) else None
        num_atoms = len(requested_atoms)

        self.assertEqual(id_of_first_atom, expected_first_atom_id,
                         'Received the wrong first atom from request')
        self.assertEqual(id_of_last_atom, expected_last_atom_id,
                         'Received the wrong last atom from request')
        if offset is not None and limit is not None:
            self.assertLessEqual(num_atoms, self._PAGINATION_MAX_LIMIT,
                                 'Received too many atoms from request')
示例#29
0
    }, hostnames, exact_image_names, image_tags)
image_obj_registry_repository_values = strats.builds(
    lambda r, n, t: {
        'image': {
            'registry': r,
            'repository': '%s:%s' % (n, t)
        },
        '_containers': [{
            kubeyaml.FHR_CONTAINER: '%s/%s:%s' % (r, n, t)
        }]
    }, hostnames, exact_image_names, image_tags)

# One of the above
toplevel_image_values = image_only_values | image_tag_values | image_registry_values | image_registry_tag_values | image_obj_values | image_obj_registry_tag_values | image_obj_registry_repository_values
# Some of the above, in fields
named_image_values = strats.dictionaries(
    keys=dns_labels, values=toplevel_image_values).map(lift_containers)
# Combo of top-level image, and images in subfields
all_image_values = strats.builds(combine_containers,
                                 strats.just(None) | toplevel_image_values,
                                 named_image_values)

values_noise = strats.deferred(lambda: strats.dictionaries(
    keys=dns_labels,
    values=values_noise | strats.integers() | strats.lists(
        values_noise) | strats.booleans() | strats.text(printable),
    max_size=3))


@settings(suppress_health_check=[HealthCheck.too_slow])
@given(all_image_values, values_noise)
def test_extract_custom_containers(image_values, noise):
def test_minimize_multi_key_dicts():
    assert minimal(dictionaries(keys=booleans(), values=booleans()), bool) == {
        False: False
    }
示例#31
0
def extend_fn(children):
    lists_st = st.lists(children)
    dicts_st = st.dictionaries(encodable_text, children)
    return lists_st | dicts_st
示例#32
0
quoted = (
    st.none()
    | st.booleans()
    | st.integers()
    | st.floats(allow_nan=False)
    | st.complex_numbers(allow_nan=False)
    | st.binary()  # bytes
)
literals = st.recursive(
    # strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None
    quoted,
    lambda children: st.lists(children)
    | st.sets(quoted)
    | st.builds(tuple, st.lists(children))
    | st.dictionaries(quoted, children),
)


class TestCompileGeneral(TestCase):
    @given(
        literals
        | st.dates()
        | st.datetimes()
        | st.decimals(allow_nan=False)
        | st.fractions()
        | st.timedeltas()
        | st.times()
        | st.uuids()
    )
    def test_compile_pickle(self, form):
def test_validates_min_size_for_sets():
    ds.sets(ds.booleans(), min_size=2)
    with pytest.raises(InvalidArgument):
        ds.sets(ds.booleans(), min_size=3).example()


def test_produces_dictionaries_of_at_least_minimum_size():
    t = find(
        ds.dictionaries(ds.booleans(), ds.integers(), min_size=2),
        lambda x: True)
    assert t == {False: 0, True: 0}


@given(
    ds.dictionaries(ds.integers(), ds.integers(), max_size=5),
    settings=Settings(max_examples=50))
def test_dictionaries_respect_size(d):
    assert len(d) <= 5


@given(
    ds.dictionaries(ds.integers(), ds.integers(), max_size=0),
    settings=Settings(max_examples=50))
def test_dictionaries_respect_zero_size(d):
    assert len(d) <= 5


@given(
    ds.lists(ds.none(), max_size=5)
)
示例#34
0
    for is_last_element, var in core.signal_last(l):
        if is_last_element:
            assert var == last


def test_signal_last_raises_value_error_with_no_length():
    """It raises value error when no length"""
    with pytest.raises(ValueError):
        items: List[Any] = []
        for i in core.signal_last(items):
            print(i)


@given(
    strategies.dictionaries(keys=strategies.text(min_size=1),
                            values=strategies.integers(),
                            min_size=1))
def test_signal_last_dictionary(d):
    """It returns last element for dictionary"""
    *_, last = d
    for is_last_element, var in core.signal_last(d.items()):
        if is_last_element:
            key, value = var
            assert key == last


@given(
    strategies.dictionaries(keys=strategies.text(min_size=1),
                            values=strategies.integers(),
                            min_size=1))
def test_signal_last_truth(d):
示例#35
0
def test_find_dictionary():
    smallest = minimal(
        dictionaries(keys=integers(), values=integers()),
        lambda xs: any(kv[0] > kv[1] for kv in xs.items()),
    )
    assert len(smallest) == 1
class TestPropertyBasedSuite:
    @pytest.mark.skip('example')
    @settings(deadline=None, max_examples=100)
    @given(
        var_bin=strategies.binary(
            5, 25).filter(lambda x: x != b'\x00\x00\x00\x00\x00'),
        var_char=strategies.characters('S').filter(
            lambda x: x not in ['@', '#', '$']),
        var_text=strategies.text(
            ascii_letters, min_size=10,
            max_size=10).map(lambda x: x.lower()),  # <<< map
        var_rec=strategies.recursive(
            strategies.integers()
            | strategies.floats(),
            lambda children: strategies.lists(children, min_size=3)
            | strategies.dictionaries(
                strategies.text(printable), children, min_size=3),
            max_leaves=10),
        var_dt_lists=strategies.integers(1, 5).flatmap(
            lambda x: strategies.lists(strategies.datetimes(), x, x)))
    @pytest.mark.asyncio
    async def test_case_strategies(self, var_bin, var_char, var_text, var_rec,
                                   var_dt_lists):
        print()
        print(var_bin)
        print(var_char)
        print(var_text)
        print(var_rec)
        print(var_dt_lists)
        print('-' * 25)

    # bad behaviour with verkey field - hypothesis resend txns with the same verkey that cause rejects
    @settings(deadline=None, max_examples=250, verbosity=Verbosity.debug)
    @given(reqid=strategies.integers(min_value=1, max_value=max_size),
           dest=strategies.text(ascii_letters, min_size=16, max_size=16),
           alias=strategies.text(min_size=1, max_size=10000))
    @pytest.mark.asyncio
    async def test_case_nym(self, pool_handler, wallet_handler,
                            get_default_trustee, reqid, dest, alias):
        trustee_did, trustee_vk = get_default_trustee
        req = {
            'protocolVersion': 2,
            'reqId': reqid,
            'identifier': trustee_did,
            'operation': {
                'type': '1',
                'dest': base58.b58encode(dest).decode(),
                'role': '201',
                'alias': alias
            }
        }
        print(req)
        res = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, trustee_did, json.dumps(req)))
        print(res)
        assert res['op'] == 'REPLY'

    @settings(deadline=None, max_examples=250)
    @given(reqid=strategies.integers(min_value=1, max_value=max_size),
           xhash=strategies.text().map(
               lambda x: hashlib.sha256(x.encode()).hexdigest()),
           key=strategies.text(min_size=1, alphabet=printable),
           value=strategies.text(min_size=1, alphabet=printable),
           enc=strategies.text(min_size=1))
    @pytest.mark.asyncio
    async def test_case_attrib(self, pool_handler, wallet_handler,
                               get_default_trustee, reqid, xhash, key, value,
                               enc):
        trustee_did, trustee_vk = get_default_trustee
        target_did, target_vk = await did.create_and_store_my_did(
            wallet_handler, '{}')
        res = await send_nym(pool_handler, wallet_handler, trustee_did,
                             target_did, target_vk)
        assert res['op'] == 'REPLY'
        req_base = {
            'protocolVersion': 2,
            'identifier': target_did,
            'operation': {
                'type': '100',
                'dest': target_did
            }
        }

        req1 = copy.deepcopy(req_base)
        req1['reqId'] = reqid + 1
        req1['operation']['hash'] = xhash
        res1 = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, target_did, json.dumps(req1)))
        print(req1)
        print(res1)
        assert res1['op'] == 'REPLY'

        req2 = copy.deepcopy(req_base)
        req2['reqId'] = reqid + 2
        req2['operation']['raw'] = json.dumps({key: value})
        res2 = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, target_did, json.dumps(req2)))
        print(req2)
        print(res2)
        assert res2['op'] == 'REPLY'

        req3 = copy.deepcopy(req_base)
        req3['reqId'] = reqid + 3
        req3['operation']['enc'] = enc
        res3 = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, target_did, json.dumps(req3)))
        print(req3)
        print(res3)
        assert res3['op'] == 'REPLY'

    @settings(deadline=None, max_examples=250)
    @given(reqid=strategies.integers(min_value=1, max_value=max_size),
           version=strategies.floats(min_value=0.1, max_value=999.999),
           name=strategies.text(min_size=1),
           attrs=strategies.lists(strategies.text(min_size=1),
                                  min_size=1,
                                  max_size=125))
    @pytest.mark.asyncio
    async def test_case_schema(self, pool_handler, wallet_handler,
                               get_default_trustee, reqid, version, name,
                               attrs):
        trustee_did, trustee_vk = get_default_trustee
        creator_did, creator_vk = await did.create_and_store_my_did(
            wallet_handler, '{}')
        res = await send_nym(pool_handler, wallet_handler, trustee_did,
                             creator_did, creator_vk, None, 'TRUSTEE')
        assert res['op'] == 'REPLY'
        req = {
            'protocolVersion': 2,
            'reqId': reqid,
            'identifier': creator_did,
            'operation': {
                'type': '101',
                'data': {
                    'version': str(version),
                    'name': name,
                    'attr_names': attrs
                }
            }
        }
        res = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, creator_did, json.dumps(req)))
        print(req)
        print(res)
        assert res['op'] == 'REPLY'

    @settings(deadline=None, max_examples=250, verbosity=Verbosity.verbose)
    @given(reqid=strategies.integers(min_value=1, max_value=max_size),
           tag=strategies.text(printable, min_size=1),
           primary=strategies.recursive(
               strategies.dictionaries(strategies.text(printable, min_size=1),
                                       strategies.text(printable, min_size=1),
                                       min_size=1,
                                       max_size=3),
               lambda x: strategies.dictionaries(strategies.text(printable,
                                                                 min_size=1),
                                                 x,
                                                 min_size=1,
                                                 max_size=3)))
    @pytest.mark.asyncio
    async def test_case_cred_def(self, pool_handler, wallet_handler,
                                 get_default_trustee, reqid, tag, primary):
        trustee_did, trustee_vk = get_default_trustee
        creator_did, creator_vk = await did.create_and_store_my_did(
            wallet_handler, '{}')
        res = await send_nym(pool_handler, wallet_handler, trustee_did,
                             creator_did, creator_vk, None, 'TRUSTEE')
        assert res['op'] == 'REPLY'
        schema_id, res = await send_schema(pool_handler,
                                           wallet_handler, creator_did,
                                           random_string(10), '1.0',
                                           json.dumps(['attribute']))
        assert res['op'] == 'REPLY'
        await asyncio.sleep(1)
        res = await get_schema(pool_handler, wallet_handler, creator_did,
                               schema_id)
        schema_id, schema_json = await ledger.parse_get_schema_response(
            json.dumps(res))
        req = {
            'protocolVersion': 2,
            'reqId': reqid,
            'identifier': creator_did,
            'operation': {
                'type': '102',
                'ref': json.loads(schema_json)['seqNo'],
                'signature_type': 'CL',
                'tag': tag,
                'data': {
                    'primary': primary
                }
            }
        }
        res = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, creator_did, json.dumps(req)))
        print(res)
        assert res['op'] == 'REPLY'

    @settings(deadline=None, max_examples=10000, verbosity=Verbosity.verbose)
    @given(values=strategy_for_op_and_data_cases())
    @pytest.mark.asyncio
    async def test_case_random_req_op(self, pool_handler, wallet_handler,
                                      get_default_trustee, values):
        trustee_did, trustee_vk = get_default_trustee
        req = {
            'protocolVersion': 2,
            'reqId': values[0],
            'identifier': trustee_did,
            'operation': values[2]
        }
        # client-side validation
        with pytest.raises(IndyError):
            await ledger.sign_and_submit_request(pool_handler, wallet_handler,
                                                 trustee_did, json.dumps(req))

    @settings(deadline=None, max_examples=10000, verbosity=Verbosity.verbose)
    @given(values=strategy_for_op_and_data_cases())
    @pytest.mark.asyncio
    async def test_case_random_req_data(self, pool_handler, wallet_handler,
                                        get_default_trustee, values):
        trustee_did, trustee_vk = get_default_trustee
        req = {
            'protocolVersion': 2,
            'reqId': values[0],
            'identifier': trustee_did,
            'operation': {
                'type': str(values[1]),
                'data': values[2]
            }
        }
        res = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, trustee_did, json.dumps(req)))
        # server-side static validation
        try:
            assert res['op'] == 'REQNACK'
        except KeyError:
            res = {k: json.loads(v) for k, v in res.items()}
            assert all([v['op'] == 'REQNACK' for k, v in res.items()])

    @settings(deadline=None, max_examples=10000, verbosity=Verbosity.verbose)
    @given(amount=strategies.integers(min_value=0, max_value=max_size),
           seqno=strategies.integers(min_value=0, max_value=max_size),
           signatures=strategies.text(ascii_letters,
                                      min_size=0,
                                      max_size=max_size),
           reqid=strategies.integers(min_value=1, max_value=max_size))
    @pytest.mark.asyncio
    async def test_case_invalid_payment(self, payment_init, pool_handler,
                                        wallet_handler, get_default_trustee,
                                        amount, seqno, signatures, reqid):
        libsovtoken_payment_method = 'sov'
        trustee_did, _ = get_default_trustee
        try:
            address1 = await payment.create_payment_address(
                wallet_handler, libsovtoken_payment_method,
                json.dumps({'seed': '0000000000000000000000000Wallet1'}))
            address2 = await payment.create_payment_address(
                wallet_handler, libsovtoken_payment_method,
                json.dumps({'seed': '0000000000000000000000000Wallet2'}))
        except IndyError:
            address1 = 'pay:sov:aRczGoccsHV7mNJgpBVYwCveytvyL8JBa1X28GFSwD44m76eE'
            address2 = 'pay:sov:H8v7bJwwKEnEUjd5dGec3oTbLMwgFLUVHL7kDKtVqBtLaQ2JG'
        req = {
            'operation': {
                'type': '10001',
                'outputs': [{
                    'address': address2.split(':')[-1],
                    'amount': amount
                }],
                'inputs': [{
                    'address': address1.split(':')[-1],
                    'seqNo': seqno
                }],
                'signatures': [signatures]
            },
            'reqId': reqid,
            'protocolVersion': 2,
            'identifier': trustee_did
        }
        print(req)
        res = json.loads(await ledger.sign_and_submit_request(
            pool_handler, wallet_handler, trustee_did, json.dumps(req)))
        print(res)
        assert res['op'] == 'REQNACK'
示例#37
0
class StatusMagicFolder(AsyncTestCase):
    """
    Tests for ``magic-folder status``.
    """
    @defer.inlineCallbacks
    def test_command_exists(self):
        """
        There is a status command at all.
        """
        outcome = yield cli(
            FilePath(self.mktemp()),
            [b"status", b"--help"],
        )
        addOutcomeDetails(self, outcome)
        self.assertThat(
            outcome.succeeded(),
            Equals(True),
        )

    @defer.inlineCallbacks
    def test_command_error(self):
        """
        If the status command encounters an error it reports it on stderr and
        exits with a non-zero code.
        """
        outcome = yield cli(
            # Pass in a fanciful node directory to provoke a predictable
            # error.
            FilePath(self.mktemp()),
            [b"status"],
        )
        self.expectThat(
            outcome.succeeded(),
            Equals(False),
        )
        self.expectThat(
            outcome.stderr,
            Contains(b"does not exist"),
        )

    @defer.inlineCallbacks
    def test_command_success(self):
        """
        If the status command succeeds it reports some information on stdout.
        """
        client_fixture = SelfConnectedClient(reactor)
        yield client_fixture.use_on(self)

        # Create a magic folder so that we can inspect its status.
        magic_folder = client_fixture.tempdir.child(u"magic-folder")
        outcome = yield cli(
            client_fixture.node_directory,
            [
                b"create",
                b"magic-folder-alias:",
                b"member-alias",
                magic_folder.asBytesMode().path,
            ],
        )
        self.assertThat(
            outcome.succeeded(),
            Equals(True),
        )

        assigner = SameProcessStreamEndpointAssigner()
        assigner.setUp()
        self.addCleanup(assigner.tearDown)
        ignored, endpoint_description = assigner.assign(reactor)

        # Start the magic folder service after creating the magic folder so it
        # will be noticed.
        magic_folder_service = magic_folder_cli.MagicFolderService.from_node_directory(
            reactor,
            client_fixture.node_directory.path,
            endpoint_description,
        )
        magic_folder_service.startService()
        self.addCleanup(magic_folder_service.stopService)

        outcome = yield cli(
            client_fixture.node_directory,
            [b"status"],
        )

        addOutcomeDetails(self, outcome)

        self.assertThat(
            outcome.succeeded(),
            Equals(True),
        )

    @given(
        folder_names(),
        datetimes(),
        dictionaries(
            path_segments(),
            tuples(just(u"filenode"), filenodes()),
        ),
        # Laziness
        path_segments(),
        lists(queued_items()),
        lists(queued_items()),
    )
    def test_formatting(
        self,
        folder_name,
        now,
        local_files,
        remote_name,
        upload_items,
        download_items,
    ):
        self.assertThat(
            magic_folder_cli._format_status(
                now,
                Status(
                    folder_name,
                    local_files=local_files,
                    remote_files={remote_name: local_files},
                    folder_status=list(
                        status_for_item(kind, item) for (kind, items) in [
                            ("upload", upload_items),
                            ("download", download_items),
                        ] for item in items),
                ),
            ),
            IsInstance(unicode),
        )
示例#38
0
文件: _strategies.py 项目: jab/bidict
NON_NAMED_BIDICT_TYPES = one_of(t.NON_NAMED_BIDICT_TYPES)
ORDERED_MAPPING_TYPES = one_of(t.ORDERED_MAPPING_TYPES)
HASHABLE_MAPPING_TYPES = one_of(t.HASHABLE_MAPPING_TYPES)
ON_DUP_ACTIONS = one_of((DROP_NEW, DROP_OLD, RAISE))
ON_DUP = st.tuples(ON_DUP_ACTIONS, ON_DUP_ACTIONS, ON_DUP_ACTIONS).map(OnDup._make)

BOOLEANS = st.booleans()
# Combine a few different strategies together that generate atomic values
# that can be used to initialize test bidicts with. Including only None, bools, and ints
# provides enough coverage; including more just slows down example generation.
ATOMS = st.none() | BOOLEANS | st.integers()
PAIRS = st.tuples(ATOMS, ATOMS)
NON_MAPPINGS = ATOMS | st.iterables(ATOMS)
ALPHABET = tuple(chr(i) for i in range(0x10ffff) if chr(i).isidentifier())
VALID_NAMES = st.text(ALPHABET, min_size=1, max_size=16)
DICTS_KW_PAIRS = st.dictionaries(VALID_NAMES, ATOMS, max_size=MAX)
L_PAIRS = st.lists(PAIRS, max_size=MAX)
I_PAIRS = st.iterables(PAIRS, max_size=MAX)
FST_SND = (itemgetter(0), itemgetter(1))
L_PAIRS_NODUP = st.lists(PAIRS, unique_by=FST_SND, max_size=MAX)
I_PAIRS_NODUP = st.iterables(PAIRS, unique_by=FST_SND, max_size=MAX)
# Reserve a disjoint set of atoms as a source of values guaranteed not to have been
# inserted into a test bidict already.
DIFF_ATOMS = st.characters()
DIFF_PAIRS = st.tuples(DIFF_ATOMS, DIFF_ATOMS)
L_DIFF_PAIRS_NODUP = st.lists(DIFF_PAIRS, unique_by=FST_SND, min_size=1, max_size=MAX)
DIFF_ITEMS = st.tuples(L_PAIRS_NODUP, L_DIFF_PAIRS_NODUP)
RANDOMS = st.randoms(use_true_random=False)
SAME_ITEMS_DIFF_ORDER = st.tuples(
    st.lists(PAIRS, unique_by=FST_SND, min_size=2, max_size=MAX), RANDOMS
).map(
示例#39
0
from hypothesis import given
from hypothesis import strategies as st

from giap.consumer import Method, Operation
from giap.core import GIAP


@given(
    st.one_of(st.integers(min_value=1), st.text()),
    st.text(),
    st.dictionaries(st.text(), st.text()),
    st.one_of(st.none(),
              st.ip_addresses().map(str)),
    st.text(),
)
def test_track(mock_consumer, id_, name, properties, ip_address, token):
    giap = GIAP(token, "")

    giap.track(id_, name, properties, ip_address)

    args = mock_consumer.send.call_args_list[-1][0]
    assert args[0] == "/events"
    assert args[2] == token

    data = args[1]["events"][0]
    assert data["$distinct_id"] == str(id_)
    assert data["$name"] == name
    assert isinstance(data["$time"], int)
    assert "$lib" in data
    assert "$lib_version" in data
示例#40
0
    assert headers_to_scrapy([{'name': 'Content-Type', 'value': 'text/html'}]) == html_headers


_primitive = (
    st.floats(allow_infinity=False, allow_nan=False) |
    st.booleans() |
    st.text() |
    st.none() |
    st.integers()
)
_data = st.recursive(_primitive,
    lambda children: (
        children |
        st.lists(children) |
        st.tuples(children) |
        st.dictionaries(st.text(), children) |
        st.tuples(st.just('h'), children)
    ),
    max_leaves=5,
)
_data_notuples = st.recursive(_primitive,
    lambda children: (
        children |
        st.lists(children) |
        st.dictionaries(st.text(), children)
    ),
    max_leaves=5,
)


@given(_data, _data)
示例#41
0
def dict_and_key(draw, keys=st.integers(), values=st.uuids()):
    d = draw(st.dictionaries(keys, values, min_size=1))
    key = draw(st.sampled_from(list(d.keys())))
    return (d, key)
示例#42
0
def homogeneous_dictionary(**kwargs):
    """Return a strategy which generates a dictionary of uniform key:value type."""
    return index_types.flatmap(lambda s: hs.dictionaries(s(), s(), **kwargs))
示例#43
0
def dict_node(draw, key=const_node(), value=const_node(), **kwargs):
    items = draw(hs.dictionaries(key, value, **kwargs)).items()
    node = nodes.Dict()
    node.postinit(items)
    return node
示例#44
0
def test_map_empty():
    m: Map[str, int] = map.empty
    assert map.is_empty(m)
    assert len(m) == 0
    assert not m


def test_map_non_empty():
    m: Map[str, int] = map.empty.add("test", 42)
    assert not map.is_empty(m)
    assert len(m) == 1
    assert m


@given(st.dictionaries(keys=st.text(), values=st.integers()))
def test_map_create(xs: Dict[str, int]):
    items: Iterable[Tuple[str, int]] = xs.items()
    m = map.create(items)
    assert len(m) == len(xs)


@given(st.dictionaries(keys=st.text(), values=st.integers()))
def test_map_of_seq(xs: Dict[str, int]):
    items: ItemsView[str, int] = xs.items()
    m = map.of_seq(items)
    assert len(m) == len(xs)


@given(st.dictionaries(keys=st.text(), values=st.integers()))
def test_map_to_list_fluent(xs: Dict[str, int]):
示例#45
0
def random_dictionary(**kwargs):
    """Return a strategy which generates a random list."""
    return hs.dictionaries(primitive_values, primitive_values, **kwargs)
def test_produces_dictionaries_of_at_least_minimum_size():
    t = find(ds.dictionaries(ds.booleans(), ds.integers(), min_size=2),
             lambda x: True)
    assert t == {False: 0, True: 0}
示例#47
0
class TestAutoMonitoring:
    @staticmethod
    def assert_feature_data_correctness(feature_data, in_df, out_df):
        """Verifies that profiler type and reference are correct for column."""
        col_type = feature_data.labels["col_type"]
        source_df = in_df if col_type == "input" else out_df
        assert feature_data.feature_name in source_df

        # reconstruct reference distribution
        reference_content = json.loads(feature_data.content)
        reference = _verta_data_type._VertaDataType._from_dict(
            reference_content)

        # reconstruct profiler
        profiler_name = feature_data.profiler_name
        profiler_args = json.loads(feature_data.profiler_parameters)
        feature_profiler = getattr(profiler, profiler_name)(**profiler_args)

        # verify re-profiling column yields reference distribution
        _, profile = feature_profiler.profile_column(
            source_df,
            feature_data.feature_name,
        )
        assert profile == reference

    def test_non_df(self, model_version):
        pd = pytest.importorskip("pandas")

        with pytest.raises(TypeError):
            model_version.log_training_data_profile(
                "abc",
                pd.DataFrame([1, 2, 3]),
            )
        with pytest.raises(TypeError):
            model_version.log_training_data_profile(
                pd.DataFrame([1, 2, 3]),
                2,
            )

        # coerce out_df if Series
        model_version.log_training_data_profile(
            pd.DataFrame([1, 2, 3], columns=["in"]),
            pd.Series([1, 2, 3], name="out"),
        )

    @hypothesis.settings(deadline=None)  # building DataFrames can be slow
    @hypothesis.given(
        df=strategies.simple_dataframes(),  # pylint: disable=no-value-for-parameter
        labels=st.dictionaries(st.text(), st.text()),
    )
    def test_create_summaries(self, df, labels):
        """Unit test for the exact expected output of discrete & continuous columns."""
        pytest.importorskip("numpy")

        # missing
        for col in ["continuous", "discrete"]:
            feature_data = RegisteredModelVersion._create_missing_value_summary(
                df,
                col,
                labels,
            )
            _sample = profiler.MissingValuesProfiler([col]).profile(df)
            _histogram = list(_sample.values())[0]
            assert feature_data.feature_name == col
            assert feature_data.profiler_name == "MissingValuesProfiler"
            assert json.loads(feature_data.profiler_parameters) == {
                "columns": [col]
            }
            assert feature_data.summary_type_name == "verta.discreteHistogram.v1"
            assert feature_data.labels == labels
            assert json.loads(feature_data.content) == _histogram._as_dict()

        # continuous distribution
        feature_data = RegisteredModelVersion._create_continuous_histogram_summary(
            df,
            "continuous",
            labels,
        )
        _sample = profiler.ContinuousHistogramProfiler(["continuous"
                                                        ]).profile(df)
        _histogram = list(_sample.values())[0]
        assert feature_data.feature_name == "continuous"
        assert feature_data.profiler_name == "ContinuousHistogramProfiler"
        assert json.loads(feature_data.profiler_parameters) == {
            "columns": ["continuous"],
            "bins": _histogram._bucket_limits,
        }
        assert feature_data.summary_type_name == "verta.floatHistogram.v1"
        assert feature_data.labels == labels
        assert json.loads(feature_data.content) == _histogram._as_dict()

        # discrete distribution
        feature_data = RegisteredModelVersion._create_discrete_histogram_summary(
            df,
            "discrete",
            labels,
        )
        _sample = profiler.BinaryHistogramProfiler(["discrete"]).profile(df)
        _histogram = list(_sample.values())[0]
        assert feature_data.feature_name == "discrete"
        assert feature_data.profiler_name == "BinaryHistogramProfiler"
        assert json.loads(feature_data.profiler_parameters) == {
            "columns": ["discrete"]
        }
        assert feature_data.summary_type_name == "verta.discreteHistogram.v1"
        assert feature_data.labels == labels
        assert json.loads(feature_data.content) == _histogram._as_dict()

    @hypothesis.settings(deadline=None)  # building DataFrames can be slow
    @hypothesis.given(
        df=strategies.dataframes(  # pylint: disable=no-value-for-parameter
            min_rows=1,
            min_cols=2,
        ), )
    def test_compute_training_data_profile(self, df):
        """Unit test for helper functions handling DFs of various sizes."""
        in_df, out_df = df.iloc[:, :-1], df.iloc[:, [-1]]

        feature_data_list = RegisteredModelVersion._compute_training_data_profile(
            in_df,
            out_df,
        )
        for feature_data in feature_data_list:
            self.assert_feature_data_correctness(feature_data, in_df, out_df)

    @hypothesis.settings(deadline=None)  # building DataFrames can be slow
    @hypothesis.given(
        df=strategies.dataframes(  # pylint: disable=no-value-for-parameter
            min_rows=1,
            min_cols=2,
        ), )
    def test_collect_feature_data_and_vis_attributes(self, df):
        """Unit test that attributes pre-logging are the correct format."""
        in_df, out_df = df.iloc[:, :-1], df.iloc[:, [-1]]

        feature_data_list = RegisteredModelVersion._compute_training_data_profile(
            in_df,
            out_df,
        )
        feature_data_attrs = (
            RegisteredModelVersion._collect_feature_data_and_vis_attributes(
                feature_data_list, ))

        for key, val in feature_data_attrs.items():
            if key.startswith(_deployable_entity._FEATURE_DATA_ATTR_PREFIX):
                feature_data = _utils.json_to_proto(val,
                                                    FeatureDataInModelVersion)
                self.assert_feature_data_correctness(feature_data, in_df,
                                                     out_df)

                if feature_data.profiler_name == "MissingValuesProfiler":
                    sample_key = feature_data.feature_name + "MissingValues"
                else:
                    sample_key = feature_data.feature_name + "Distribution"
                sample_key = (
                    _deployable_entity._TRAINING_DATA_ATTR_PREFIX +
                    RegisteredModelVersion._normalize_attribute_key(sample_key)
                )
                assert feature_data_attrs[sample_key] == json.loads(
                    feature_data.content)

    def test_profile_training_data(self, model_version):
        """Integration test for logging attributes with correct structure."""
        pd = pytest.importorskip("pandas")
        np = pytest.importorskip("numpy")

        cont_col = np.random.random(100)
        discrete_col = np.random.choice(5, 100)
        string_discrete_col = np.random.choice(["a", "b", "c", "d", "e"],
                                               size=100)
        string_freeform_col = [
            uuid.uuid4().hex.upper()[0:10] for _ in range(100)
        ]
        other_col = [datetime.datetime.now() for x in range(100)]
        output_col = np.random.choice(2, 100)

        col_names = [
            "Continuous_Numeric",
            "Discrete_Numeric",
            "Discrete_String",
            "Freeform_String",
            "Other",
            "Output_Col",
        ]
        supported_col_names = [
            "Continuous_Numeric", "Discrete_Numeric", "Output_Col"
        ]

        # create dataframes
        df = pd.DataFrame(
            list(
                zip(
                    cont_col,
                    discrete_col,
                    string_discrete_col,
                    string_freeform_col,
                    other_col,
                    output_col,
                )),
            columns=col_names,
        )

        # log to model version with new method
        model_version.log_training_data_profile(
            df.loc[:, df.columns != "Output_Col"],
            pd.DataFrame(df["Output_Col"]),
        )

        # get back attributes to validate
        attributes = model_version.get_attributes()
        key = _deployable_entity._FEATURE_DATA_ATTR_PREFIX + "{}"
        discrete_col_missing_summary = _utils.json_to_proto(
            model_version.get_attribute(key.format("2")),
            FeatureDataInModelVersion,  # missing value
        )
        discrete_col_distribution_summary = _utils.json_to_proto(
            model_version.get_attribute(key.format("3")),
            FeatureDataInModelVersion,  # missing value
        )

        # missing value, distribution summary for each supported column +
        # equal number of attributes for visualization
        assert len(attributes.keys()) == len(supported_col_names) * 2 * 2
        assert (discrete_col_distribution_summary.summary_type_name ==
                "verta.discreteHistogram.v1")
        assert (discrete_col_distribution_summary.profiler_name ==
                "BinaryHistogramProfiler")
        assert (len(
            json.loads(discrete_col_distribution_summary.content)
            ["discreteHistogram"]["buckets"]) <= 5)

        assert (discrete_col_missing_summary.summary_type_name ==
                "verta.discreteHistogram.v1")
        assert discrete_col_missing_summary.profiler_name == "MissingValuesProfiler"
        assert (len(
            json.loads(discrete_col_missing_summary.content)
            ["discreteHistogram"]["buckets"]) == 2)

        # reference distribution attributes can be fetched back as histograms
        for col in supported_col_names:
            key = _deployable_entity._TRAINING_DATA_ATTR_PREFIX + col + "Distribution"
            histogram = model_version.get_attribute(key)
            assert isinstance(histogram, _verta_data_type._VertaDataType)

    def test_reconstruct_profilers(self, model_version):
        """Profiler and ref distribution can be reconstructed from attr."""
        np = pytest.importorskip("numpy")
        pd = pytest.importorskip("pandas")

        in_col = "continuous"
        out_col = "discrete"
        num_rows = 24
        df = pd.DataFrame(
            {
                in_col: np.random.random(size=num_rows) * 10,
                out_col: range(num_rows),
            }, )
        model_version.log_training_data_profile(
            in_df=df[[in_col]],
            out_df=df[[out_col]],
        )

        for key, val in model_version.get_attributes().items():
            if key.startswith(_deployable_entity._FEATURE_DATA_ATTR_PREFIX):
                feature_data = val

                reference_content = json.loads(feature_data["content"])
                reference = _verta_data_type._VertaDataType._from_dict(
                    reference_content)

                profiler_name = feature_data["profiler_name"]
                profiler_args = json.loads(feature_data["profiler_parameters"])
                feature_profiler = getattr(profiler,
                                           profiler_name)(**profiler_args)

                if isinstance(feature_profiler,
                              profiler.MissingValuesProfiler):
                    point = None
                elif isinstance(feature_profiler,
                                profiler.BinaryHistogramProfiler):
                    point = np.random.randint(num_rows)
                elif isinstance(feature_profiler,
                                profiler.ContinuousHistogramProfiler):
                    point = np.random.random() * 10
                else:
                    raise TypeError(
                        "this test doesn't support profiler type {}".format(
                            type(feature_profiler)))
                point_profile = feature_profiler.profile_point(
                    point, reference)
                assert point_profile._type_string(
                ) == feature_data["summary_type_name"]
def test_minimize_multi_key_dicts():
    assert find(
        dictionaries(keys=booleans(), values=booleans()),
        bool
    ) == {False: False}
def test_decimals():
    assert minimal(ds.decimals(), lambda f: f.is_finite() and f >= 1) == 1


def test_non_float_decimal():
    minimal(ds.decimals(), lambda d: d.is_finite() and decimal.Decimal(float(d)) != d)


def test_produces_dictionaries_of_at_least_minimum_size():
    t = minimal(
        ds.dictionaries(ds.booleans(), ds.integers(), min_size=2), lambda x: True
    )
    assert t == {False: 0, True: 0}


@given(ds.dictionaries(ds.integers(), ds.integers(), max_size=5))
@settings(max_examples=50)
def test_dictionaries_respect_size(d):
    assert len(d) <= 5


@given(ds.dictionaries(ds.integers(), ds.integers(), max_size=0))
@settings(max_examples=50)
def test_dictionaries_respect_zero_size(d):
    assert len(d) <= 5


@given(ds.lists(ds.none(), max_size=5))
def test_none_lists_respect_max_size(ls):
    assert len(ls) <= 5
def extend_fn(children):
    lists_st = st.lists(children)
    dicts_st = st.dictionaries(encodable_text, children)
    return lists_st | dicts_st
示例#51
0
文件: test_dvol.py 项目: lalyos/dvol
        indicating that it is the currently selected volume.
        """
        dvol = VoluminousOptions()
        dvol.parseOptions(ARGS + ["-p", self.tmpdir.path, "init", "foo"])
        dvol.parseOptions(ARGS + ["-p", self.tmpdir.path, "init", "bar"])
        dvol.parseOptions(ARGS + ["-p", self.tmpdir.path, "rm", "-f", "bar"])
        dvol.parseOptions(ARGS + ["-p", self.tmpdir.path, "list"])
        header, rest = self._parse_list_output(dvol)
        expected_volumes = [["foo", "master"]]
        self.assertEqual(
            sorted(expected_volumes),
            sorted(rest),
        )

    @skip_if_go_version
    @given(volumes=dictionaries(
        volume_names(), branch_names(), min_size=1).map(items))
    def test_branch_multi_volumes(self, volumes):
        """
        Always show the last checked-out branch for all volumes in ``list``.
        """
        tmpdir = FilePath(self.mktemp())
        tmpdir.makedirs()

        dvol = VoluminousOptions()
        for volume, branch in volumes:
            dvol.parseOptions(ARGS + ["-p", tmpdir.path, "init", volume])
            dvol.parseOptions(ARGS + ["-p", tmpdir.path, "commit", "-m", "hello"])
            dvol.parseOptions(ARGS + ["-p", tmpdir.path, "checkout", "-b", branch])

        dvol.parseOptions(ARGS + ["-p", tmpdir.path, "list"])
        lines = dvol.voluminous.getOutput()[-1].split("\n")
示例#52
0
    get_integer_bounds,
    get_number_bounds,
    get_type,
    is_valid,
    merged,
    resolve_all_refs,
)

JSON_STRATEGY: st.SearchStrategy[JSONType] = st.recursive(
    st.none()
    | st.booleans()
    | st.integers()
    | st.floats(allow_nan=False, allow_infinity=False).map(lambda x: x or 0.0)
    | st.text(),
    lambda strategy: st.lists(strategy, max_size=3)
    | st.dictionaries(st.text(), strategy, max_size=3),
)


def merged_as_strategies(schemas: List[Schema]) -> st.SearchStrategy[JSONType]:
    assert schemas, "internal error: must pass at least one schema to merge"
    if len(schemas) == 1:
        return from_schema(schemas[0])
    # Try to merge combinations of strategies.
    strats = []
    combined: Set[str] = set()
    inputs = {encode_canonical_json(s): s for s in schemas}
    for group in itertools.chain.from_iterable(
            itertools.combinations(inputs, n)
            for n in range(len(inputs), 0, -1)):
        if combined.issuperset(group):
    for data in data_type:
        with pytest.raises(TypeError):
            s.state = data

    if isinstance(t, str):
        with pytest.raises(ValueError):
            s.state = t

    for val in states._stage_state_values.keys():
        s.state = val


# ------------------------------------------------------------------------------
#
@given(l=st.lists(st.text()),
       d=st.dictionaries(st.text(), st.text()))
def test_stage_post_exec_assignment(l, d):

    s = Stage()

    def func():
        return True

    with pytest.raises(TypeError):
        s.post_exec = l

    with pytest.raises(TypeError):
        s.post_exec = d


    s.post_exec = func
示例#54
0
class TestIntegration(unittest.TestCase):
    @given(filepath())
    def test_release(self, project_root):
        argv = [
            'release', '--platform-config', '../path/to/config',
            '--release-data ami_id=ami-z9876', '42'
        ]
        with patch('cdflow.docker') as docker, \
                patch('cdflow.os') as os, \
                patch('cdflow.abspath') as abspath, \
                patch('cdflow.open') as open_:
            abs_path_to_config = '/root/path/to/config'
            abspath.return_value = abs_path_to_config

            image = MagicMock(spec=Image)
            docker.from_env.return_value.images.pull.return_value = image
            image.attrs = {'RepoDigests': ['hash']}

            docker.from_env.return_value.containers.run.return_value.attrs = {
                'State': {
                    'ExitCode': 0,
                }
            }

            os.getcwd.return_value = project_root
            os.getenv.return_value = False

            config_file = MagicMock(spec=TextIOWrapper)
            config_file.read.return_value = yaml.dump({'team': 'a-team'})
            open_.return_value.__enter__.return_value = config_file

            exit_status = main(argv)

        assert exit_status == 0

        docker.from_env.assert_called_once()
        docker.from_env.return_value.images.pull.assert_called_once_with(
            'mergermarket/cdflow-commands:latest')
        docker.from_env.return_value.containers.run.assert_called_once_with(
            'mergermarket/cdflow-commands:latest',
            command=argv,
            environment={
                'AWS_ACCESS_KEY_ID': ANY,
                'AWS_SECRET_ACCESS_KEY': ANY,
                'AWS_SESSION_TOKEN': ANY,
                'FASTLY_API_KEY': ANY,
                'GITHUB_TOKEN': ANY,
                'CDFLOW_IMAGE_DIGEST': 'hash',
                'LOGENTRIES_ACCOUNT_KEY': ANY,
                'DATADOG_APP_KEY': ANY,
                'DATADOG_API_KEY': ANY,
            },
            detach=True,
            volumes={
                project_root: {
                    'bind': project_root,
                    'mode': 'rw',
                },
                abs_path_to_config: {
                    'bind': abs_path_to_config,
                    'mode': 'ro',
                },
                '/var/run/docker.sock': {
                    'bind': '/var/run/docker.sock',
                    'mode': 'ro',
                },
            },
            working_dir=project_root)

        docker.from_env.return_value.containers.run.return_value.logs.\
            assert_called_once_with(
                stream=True,
                follow=True,
                stdout=True,
                stderr=True,
            )

    @given(
        fixed_dictionaries({
            'project_root':
            filepath(),
            'environment':
            dictionaries(
                keys=text(alphabet=printable, min_size=1, max_size=3),
                values=text(alphabet=printable, min_size=1, max_size=3),
                max_size=3,
            ),
            'image_id':
            image_id(),
        }))
    def test_release_with_pinned_command_image(self, fixtures):
        argv = ['release', '42', '--platform-config', 'path/to/config']
        project_root = fixtures['project_root']
        environment = fixtures['environment']
        pinned_image_id = fixtures['image_id']
        environment['CDFLOW_IMAGE_ID'] = pinned_image_id

        with patch('cdflow.docker') as docker, \
                patch('cdflow.os') as os, \
                patch('cdflow.abspath') as abspath, \
                patch('cdflow.open') as open_:
            abs_path_to_config = '/root/path/to/config'
            abspath.return_value = abs_path_to_config

            image = MagicMock(spec=Image)
            docker.from_env.return_value.images.pull.return_value = image
            image.attrs = {'RepoDigests': ['hash']}

            docker.from_env.return_value.containers.run.return_value.attrs = {
                'State': {
                    'ExitCode': 0,
                }
            }

            os.getcwd.return_value = project_root
            os.getenv.return_value = False

            os.environ = environment

            config_file = MagicMock(spec=TextIOWrapper)
            config_file.read.return_value = yaml.dump({'team': 'a-team'})
            open_.return_value.__enter__.return_value = config_file

            exit_status = main(argv)

        assert exit_status == 0

        docker.from_env.return_value.images.pull.assert_called_once_with(
            pinned_image_id)
        docker.from_env.return_value.containers.run.assert_called_once_with(
            pinned_image_id,
            command=argv,
            environment={
                'AWS_ACCESS_KEY_ID': ANY,
                'AWS_SECRET_ACCESS_KEY': ANY,
                'AWS_SESSION_TOKEN': ANY,
                'FASTLY_API_KEY': ANY,
                'GITHUB_TOKEN': ANY,
                'CDFLOW_IMAGE_DIGEST': 'hash',
                'LOGENTRIES_ACCOUNT_KEY': ANY,
                'DATADOG_APP_KEY': ANY,
                'DATADOG_API_KEY': ANY,
            },
            detach=True,
            volumes={
                project_root: {
                    'bind': project_root,
                    'mode': 'rw',
                },
                abs_path_to_config: {
                    'bind': abs_path_to_config,
                    'mode': 'ro',
                },
                '/var/run/docker.sock': {
                    'bind': '/var/run/docker.sock',
                    'mode': 'ro',
                },
            },
            working_dir=project_root)

    @given(
        fixed_dictionaries({
            'project_root':
            filepath(),
            's3_bucket_and_key':
            s3_bucket_and_key(),
            'release_bucket':
            text(
                alphabet=VALID_ALPHABET,
                min_size=3,
                max_size=5,
            ),
        }))
    def test_classic_deploy(self, fixtures):
        argv = ['deploy', 'aslive', '42']

        with patch('cdflow.Session') as Session, \
                patch('cdflow.BytesIO') as BytesIO, \
                patch('cdflow.docker') as docker, \
                patch('cdflow.os') as os, \
                patch('cdflow.open') as open_:

            s3_resource = Mock()

            image_digest = 'sha:12345asdfg'
            s3_resource.Object.return_value.metadata = {
                'cdflow_image_digest': image_digest
            }

            Session.return_value.resource.return_value = s3_resource

            BytesIO.return_value.__enter__.return_value.read.return_value = '''
                {{
                    "release-bucket": "{}",
                    "classic-metadata-handling": true
                }}
            '''.format(fixtures['release_bucket'])

            config_file = MagicMock(spec=TextIOWrapper)
            config_file.read.return_value = yaml.dump({
                'account-scheme-url':
                's3://{}/{}'.format(*fixtures['s3_bucket_and_key']),
                'team':
                'a-team',
            })
            open_.return_value.__enter__.return_value = config_file

            docker_client = MagicMock(spec=DockerClient)
            docker.from_env.return_value = docker_client
            docker.from_env.return_value.containers.run.return_value.attrs = {
                'State': {
                    'ExitCode': 0,
                }
            }

            project_root = fixtures['project_root']
            os.getcwd.return_value = project_root

            exit_status = main(argv)

            assert exit_status == 0

            s3_resource.Object.assert_any_call(
                fixtures['s3_bucket_and_key'][0],
                fixtures['s3_bucket_and_key'][1],
            )

            docker_client.containers.run.assert_called_once_with(
                image_digest,
                command=argv,
                environment=ANY,
                detach=True,
                volumes={
                    project_root: ANY,
                    '/var/run/docker.sock': ANY
                },
                working_dir=project_root,
            )

            docker.from_env.return_value.containers.run.return_value.logs.\
                assert_called_once_with(
                    stream=True,
                    follow=True,
                    stdout=True,
                    stderr=True,
                )

    @given(
        fixed_dictionaries({
            'project_root':
            filepath(),
            's3_bucket_and_key':
            s3_bucket_and_key(),
            'release_bucket':
            text(
                alphabet=VALID_ALPHABET,
                min_size=3,
                max_size=5,
            ),
            'team_name':
            text(
                alphabet=VALID_ALPHABET,
                min_size=3,
                max_size=5,
            ),
            'component_name':
            text(
                alphabet=VALID_ALPHABET,
                min_size=3,
                max_size=5,
            ),
        }))
    def test_deploy(self, fixtures):
        version = '42'
        component_name = fixtures['component_name']
        argv = ['deploy', 'aslive', version, '--component', component_name]

        with patch('cdflow.Session') as Session, \
                patch('cdflow.BytesIO') as BytesIO, \
                patch('cdflow.docker') as docker, \
                patch('cdflow.os') as os, \
                patch('cdflow.open') as open_:

            s3_resource = Mock()

            image_digest = 'sha:12345asdfg'
            s3_resource.Object.return_value.metadata = {
                'cdflow_image_digest': image_digest
            }

            Session.return_value.resource.return_value = s3_resource

            BytesIO.return_value.__enter__.return_value.read.return_value = '''
                {{
                    "release-bucket": "{}"
                }}
            '''.format(fixtures['release_bucket'])

            config_file = MagicMock(spec=TextIOWrapper)
            config_file.read.return_value = yaml.dump({
                'account-scheme-url':
                's3://{}/{}'.format(*fixtures['s3_bucket_and_key']),
                'team':
                fixtures['team_name'],
            })
            open_.return_value.__enter__.return_value = config_file

            docker_client = MagicMock(spec=DockerClient)
            docker.from_env.return_value = docker_client
            docker.from_env.return_value.containers.run.return_value.attrs = {
                'State': {
                    'ExitCode': 0,
                }
            }

            project_root = fixtures['project_root']
            os.getcwd.return_value = project_root

            exit_status = main(argv)

            assert exit_status == 0

            s3_resource.Object.assert_any_call(
                fixtures['s3_bucket_and_key'][0],
                fixtures['s3_bucket_and_key'][1],
            )

            s3_resource.Object.assert_any_call(
                fixtures['release_bucket'],
                '{}/{}/{}-{}.zip'.format(
                    fixtures['team_name'],
                    component_name,
                    component_name,
                    version,
                ),
            )

            docker_client.containers.run.assert_called_once_with(
                image_digest,
                command=argv,
                environment=ANY,
                detach=True,
                volumes={
                    project_root: ANY,
                    '/var/run/docker.sock': ANY
                },
                working_dir=project_root,
            )

            docker.from_env.return_value.containers.run.return_value.logs.\
                assert_called_once_with(
                    stream=True,
                    follow=True,
                    stdout=True,
                    stderr=True,
                )

    @given(lists(elements=text(alphabet=printable, max_size=3), max_size=3))
    def test_invalid_arguments_passed_to_container_to_handle(self, argv):
        with patch('cdflow.docker') as docker, \
                patch('cdflow.os') as os, \
                patch('cdflow.open') as open_:

            account_id = '1234567890'
            config_file = MagicMock(spec=TextIOWrapper)
            config_file.read.return_value = json.dumps(
                {'platform_config': {
                    'account_id': account_id
                }})
            open_.return_value.__enter__.return_value = config_file

            error = ContainerError(container=CDFLOW_IMAGE_ID,
                                   exit_status=1,
                                   command=argv,
                                   image=CDFLOW_IMAGE_ID,
                                   stderr='help text')
            docker.from_env.return_value.containers.run.side_effect = error
            os.path.abspath.return_value = '/'
            exit_status = main(argv)

        assert exit_status == 1

        docker.from_env.return_value.containers.run.assert_called_once_with(
            CDFLOW_IMAGE_ID,
            command=argv,
            environment=ANY,
            detach=True,
            volumes=ANY,
            working_dir=ANY)
    text,
)

import superintendent.prioritisation
from superintendent.distributed import SemiSupervisor

primitive_strategy = text() | integers() | floats(allow_nan=False) | booleans()

guaranteed_dtypes = (
    boolean_dtypes()
    | integer_dtypes()
    | floating_dtypes()
    | unicode_string_dtypes()
)

container_strategy = dictionaries(text(), primitive_strategy) | lists(
    primitive_strategy
)

nested_strategy = recursive(
    container_strategy,
    lambda children: lists(children) | dictionaries(text(), children),
)

container_strategy = dictionaries(text(), primitive_strategy) | lists(
    primitive_strategy
)

nested_strategy = recursive(
    container_strategy,
    lambda children: lists(children) | dictionaries(text(), children),
示例#56
0
def resolve_Counter(thing):
    return st.dictionaries(
        keys=st.from_type(thing.__args__[0]),
        values=st.integers(),
    ).map(collections.Counter)
示例#57
0
"""echo api testing
"""
import json
from string import printable

from hypothesis import strategies as st
from hypothesis import given
from django.test import Client

from .views import echo

json_data = st.recursive(
    st.booleans() | st.floats() | st.text(printable),
    lambda children: st.dictionaries(st.text(printable), children))


class TestEcho:
    @given(json_data=json_data)
    def test_echo_json_status_result(self, json_data, rf):
        """Testing echo api response status result is function correctly with json data
        """
        data = {'null': json.dumps(json_data)}
        request = rf.post('/echo/', data, content_type='application/json')
        response = echo(request)
        assert response.status_code == 200
        response.render()
        assert response.data == data
示例#58
0
import horse.blen
import horse.types

words = st.integers(0, horse.types.MAX_WORD)
bytes = st.integers(0, horse.types.MAX_BYTE)
nibbles = st.integers(0, horse.types.MAX_NIBBLE)

MIN_SIGNED_INT = -(1 << horse.types.WORD_N_BITS - 1)
MAX_SIGNED_INT = (1 << horse.types.WORD_N_BITS - 1) - 1
in_range_signed_integers = st.integers(MIN_SIGNED_INT, MAX_SIGNED_INT)

binary_operations = st.sampled_from(
    tuple(horse.blen.BINARY_OPERATIONS.values()))
unary_operations = st.sampled_from(tuple(horse.blen.UNARY_OPERATIONS.values()))

memories = st.dictionaries(words, words).map(lambda d: defaultdict(int, d))
machines = st.builds(
    horse.blen.Machine,
    memory=memories,
    registers=st.fixed_dictionaries(
        {register: words
         for register in horse.blen.Register}),
)


class Tests(unittest.TestCase):
    @hypothesis.given(words)
    def test_signed_integer_encode_decode(self, word):
        there = horse.blen.word_to_signed_integer(word)
        back_again = horse.blen.signed_integer_to_word(there)
        assert word == back_again, there
@given(
    st.integers(min_value=1),
    st.integers(min_value=1),
    st.integers(min_value=1))
def test_fix_window_size(w, h, blocksize):
    if (w % blocksize == 1) or (h % blocksize == 1):
        assert _adjust_block_size(w, h, blocksize) == blocksize + 1
    else:
        assert _adjust_block_size(w, h, blocksize) == blocksize


# Testing _check_crs_function from utils
crs_strategy = st.lists(
                elements=st.dictionaries(
                    st.sampled_from(['crs']),
                    st.sampled_from(
                        ('EPSG:32654', 'EPSG:25832', 'EPSG:3857')
                    ),
                    min_size=1),
                min_size=2, max_size=2)


@given(crs_strategy)
def test_check_crs(crs_list):
        if crs_list[0]['crs'] != crs_list[1]['crs']:
            with pytest.raises(RuntimeError):
                _check_crs(crs_list)


# Testing _create_apply_mask function from utils
@given(arrays(np.uint16, (3, 8, 8),
              elements=st.integers(
def test_decimals():
    assert find(ds.decimals(), lambda f: f.is_finite() and f >= 1) == 1


def test_non_float_decimal():
    find(ds.decimals(),
         lambda d: d.is_finite() and decimal.Decimal(float(d)) != d)


def test_produces_dictionaries_of_at_least_minimum_size():
    t = find(ds.dictionaries(ds.booleans(), ds.integers(), min_size=2),
             lambda x: True)
    assert t == {False: 0, True: 0}


@given(ds.dictionaries(ds.integers(), ds.integers(), max_size=5))
@settings(max_examples=50)
def test_dictionaries_respect_size(d):
    assert len(d) <= 5


@given(ds.dictionaries(ds.integers(), ds.integers(), max_size=0))
@settings(max_examples=50)
def test_dictionaries_respect_zero_size(d):
    assert len(d) <= 5


@given(ds.lists(ds.none(), max_size=5))
def test_none_lists_respect_max_size(ls):
    assert len(ls) <= 5