Ejemplo n.º 1
0
def test_keys_and_default_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers())),
        lambda x: x[0] != x[1]
    )
Ejemplo n.º 2
0
def test_different_keys_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers(), key=2)),
        lambda x: x[0] != x[1]
    )
Ejemplo n.º 3
0
def interleaved_strategy_factory():
    '''
    Generate interleaved fastq that guarantees ids are same for pairs
    *_kwargs are supplied to gen seq_rec_strategy_factory
    to customize forward and reverse reads
    '''
    strategy = st.uuids().map(str).flatmap(
        lambda id:
            st.tuples(
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id)),
                seq_rec_strategy_factory(5, 20, idstrat=st.shared(st.just(id), key=id))))
    return strategy
Ejemplo n.º 4
0
def valid_key_string(draw):
    """
    A strategy that generates valid serialized OpaqueKeys.
    """
    key_type = draw(strategies.shared(strategies.sampled_from(KEY_TYPES), key="key_type"))
    key = draw(keys_of_type(key_type))
    return text_type(key)
Ejemplo n.º 5
0
def test_simplify_shared_linked_to_size():
    xs = minimal(
        st.lists(st.shared(st.integers())),
        lambda t: sum(t) >= 1000
    )
    assert sum(xs[:-1]) < 1000
    assert (xs[0] - 1) * len(xs) < 1000
Ejemplo n.º 6
0
def distinct_integers(draw):
    used = draw(st.shared(st.builds(set), key="distinct_integers.used"))
    i = draw(st.integers(0, 2**64 - 1).filter(lambda x: x not in used))
    used.add(i)
    return i
Ejemplo n.º 7
0
def test_parity_depends(from_, to, must_match):
    """The failfast paths are themselves made of two parts,
    and they may need to differ or agree in parity depending
    on whether the successful test case is even or odd"""
    assert (even(from_) is even(to) if even(must_match) else
            (even(from_) is not even(to)))


##
# Run various processes in the emulator

WORD_START = dev.start_of_forth_word_space

## Setup for next1 tests

vticks_next1 = shared(
    integers(min_value=next.cost_of_failed_test + 1 // 2, max_value=127))
cost_of_word_success = vticks_next1.flatmap(lambda ticks: integers(
    min_value=0, max_value=(ticks - (next.cost_of_failfast + 1) // 2)))
cost_of_word_failure = vticks_next1.flatmap(lambda ticks: integers(
    min_value=(ticks - (next.cost_of_failfast + 1) // 2), max_value=127))


@given(vticks=vticks_next1, word_cost=cost_of_word_success)
def test_next1_successful_test(emulator, vticks, word_cost):
    """A successful test should result in us being in the right place"""
    # Arrange
    emulator.next_instruction = "forth.next1"
    emulator.AC = 20  # Time remaining is 20 ticks - 40 cycles
    set_W(WORD_START)
    ROM[WORD_START] = [0xA0, word_cost]
    # Act
Ejemplo n.º 8
0
class TestParameter(TestCase):
    def test_no_name(self):
        with self.assertRaises(TypeError):
            Parameter()

    def test_default_attributes(self):
        # Test the default attributes, providing only a name
        name = 'repetitions'
        p = GettableParam(name, vals=vals.Numbers())
        self.assertEqual(p.name, name)
        self.assertEqual(p.label, name)
        self.assertEqual(p.unit, '')
        self.assertEqual(str(p), name)

        # default validator is all numbers
        p.validate(-1000)
        with self.assertRaises(TypeError):
            p.validate('not a number')

        # docstring exists, even without providing one explicitly
        self.assertIn(name, p.__doc__)

        # test snapshot_get by looking at _get_count
        self.assertEqual(p._get_count, 0)
        snap = p.snapshot(update=True)
        self.assertEqual(p._get_count, 1)
        snap_expected = {
            'name': name,
            'label': name,
            'unit': '',
            'value': 42,
            'vals': repr(vals.Numbers())
        }
        for k, v in snap_expected.items():
            self.assertEqual(snap[k], v)

    def test_explicit_attributes(self):
        # Test the explicit attributes, providing everything we can
        name = 'volt'
        label = 'Voltage'
        unit = 'V'
        docstring = 'DOCS!'
        metadata = {'gain': 100}
        p = GettableParam(name,
                          label=label,
                          unit=unit,
                          vals=vals.Numbers(5, 10),
                          docstring=docstring,
                          snapshot_get=False,
                          metadata=metadata)

        self.assertEqual(p.name, name)
        self.assertEqual(p.label, label)
        self.assertEqual(p.unit, unit)
        self.assertEqual(str(p), name)

        with self.assertRaises(ValueError):
            p.validate(-1000)
        p.validate(6)
        with self.assertRaises(TypeError):
            p.validate('not a number')

        self.assertIn(name, p.__doc__)
        self.assertIn(docstring, p.__doc__)

        # test snapshot_get by looking at _get_count
        self.assertEqual(p._get_count, 0)
        # Snapshot should not perform get since snapshot_get is False
        snap = p.snapshot(update=True)
        self.assertEqual(p._get_count, 0)
        snap_expected = {
            'name': name,
            'label': label,
            'unit': unit,
            'vals': repr(vals.Numbers(5, 10)),
            'metadata': metadata
        }
        for k, v in snap_expected.items():
            self.assertEqual(snap[k], v)

        # attributes only available in MultiParameter
        for attr in [
                'names', 'labels', 'setpoints', 'setpoint_names',
                'setpoint_labels', 'full_names'
        ]:
            self.assertFalse(hasattr(p, attr), attr)

    def test_number_of_validations(self):

        p = Parameter('p',
                      set_cmd=None,
                      initial_value=0,
                      vals=BookkeepingValidator())

        self.assertEqual(p.vals.values_validated, [0])

        p.step = 1
        p.set(10)

        self.assertEqual(p.vals.values_validated,
                         [0, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9])

    def test_snapshot_value(self):
        p_snapshot = Parameter('no_snapshot',
                               set_cmd=None,
                               get_cmd=None,
                               snapshot_value=True)
        p_snapshot(42)
        snap = p_snapshot.snapshot()
        self.assertIn('value', snap)
        p_no_snapshot = Parameter('no_snapshot',
                                  set_cmd=None,
                                  get_cmd=None,
                                  snapshot_value=False)
        p_no_snapshot(42)
        snap = p_no_snapshot.snapshot()
        self.assertNotIn('value', snap)

    def test_has_set_get(self):
        # Create parameter that has no set_cmd, and get_cmd returns last value
        gettable_parameter = Parameter('1', set_cmd=False, get_cmd=None)
        self.assertTrue(hasattr(gettable_parameter, 'get'))
        self.assertFalse(hasattr(gettable_parameter, 'set'))
        with self.assertRaises(NotImplementedError):
            gettable_parameter(1)
        # Initial value is None if not explicitly set
        self.assertIsNone(gettable_parameter())

        # Create parameter that saves value during set, and has no get_cmd
        settable_parameter = Parameter('2', set_cmd=None, get_cmd=False)
        self.assertFalse(hasattr(settable_parameter, 'get'))
        self.assertTrue(hasattr(settable_parameter, 'set'))
        with self.assertRaises(NotImplementedError):
            settable_parameter()
        settable_parameter(42)

        settable_gettable_parameter = Parameter('3',
                                                set_cmd=None,
                                                get_cmd=None)
        self.assertTrue(hasattr(settable_gettable_parameter, 'set'))
        self.assertTrue(hasattr(settable_gettable_parameter, 'get'))
        self.assertIsNone(settable_gettable_parameter())
        settable_gettable_parameter(22)
        self.assertEqual(settable_gettable_parameter(), 22)

    def test_str_representation(self):
        # three cases where only name gets used for full_name
        for instrument in blank_instruments:
            p = Parameter(name='fred')
            p._instrument = instrument
            self.assertEqual(str(p), 'fred')

        # and finally an instrument that really has a name
        p = Parameter(name='wilma')
        p._instrument = named_instrument
        self.assertEqual(str(p), 'astro_wilma')

    def test_bad_validator(self):
        with self.assertRaises(TypeError):
            Parameter('p', vals=[1, 2, 3])

    def test_step_ramp(self):
        p = MemoryParameter(name='test_step')
        p(42)
        self.assertListEqual(p.set_values, [42])
        p.step = 1

        self.assertListEqual(p.get_ramp_values(44.5, 1), [43, 44, 44.5])

        p(44.5)
        self.assertListEqual(p.set_values, [42, 43, 44, 44.5])

    def test_scale_raw_value(self):
        p = Parameter(name='test_scale_raw_value', set_cmd=None)
        p(42)
        self.assertEqual(p.raw_value, 42)

        p.scale = 2
        self.assertEqual(p.raw_value, 42)  # No set/get cmd performed
        self.assertEqual(p(), 21)

        p(10)
        self.assertEqual(p.raw_value, 20)
        self.assertEqual(p(), 10)

    # There are a number different scenarios for testing a parameter with scale
    # and offset. Therefore a custom strategy for generating test parameters
    # is implemented here. The possible cases are:
    # for getting and setting a parameter: values can be
    #    scalar:
    #        offset and scale can be scalars
    # for getting only:
    #    array:
    #        offset and scale can be scalars or arrays(of same legnth as values)
    #        independently

    # define shorthands for strategies
    TestFloats = hst.floats(min_value=-1e40,
                            max_value=1e40).filter(lambda x: x != 0)
    SharedSize = hst.shared(hst.integers(min_value=1, max_value=100),
                            key='shared_size')
    ValuesScalar = hst.shared(hst.booleans(), key='values_scalar')

    # the following test stra
    @hst.composite
    def iterable_or_number(draw, values, size, values_scalar, is_values):
        if draw(values_scalar):
            # if parameter values are scalar, return scalar for values and scale/offset
            return draw(values)
        elif is_values:
            # if parameter values are not scalar and parameter values are requested
            # return a list of values of the given size
            return draw(
                hst.lists(values, min_size=draw(size), max_size=draw(size)))
        else:
            # if parameter values are not scalar and scale/offset are requested
            # make a random choice whether to return a list of the same size as the values
            # or a simple scalar
            if draw(hst.booleans()):
                return draw(
                    hst.lists(values, min_size=draw(size),
                              max_size=draw(size)))
            else:
                return draw(values)

    @settings(max_examples=500)  # default:100 increased
    @given(values=iterable_or_number(TestFloats, SharedSize, ValuesScalar,
                                     True),
           offsets=iterable_or_number(TestFloats, SharedSize, ValuesScalar,
                                      False),
           scales=iterable_or_number(TestFloats, SharedSize, ValuesScalar,
                                     False))
    def test_scale_and_offset_raw_value_iterable(self, values, offsets,
                                                 scales):
        p = Parameter(name='test_scale_and_offset_raw_value', set_cmd=None)

        # test that scale and offset does not change the default behaviour
        p(values)
        self.assertEqual(p.raw_value, values)

        # test setting scale and offset does not change anything
        p.scale = scales
        p.offset = offsets
        self.assertEqual(p.raw_value, values)

        np_values = np.array(values)
        np_offsets = np.array(offsets)
        np_scales = np.array(scales)
        np_get_values = np.array(p())
        np.testing.assert_allclose(np_get_values, (np_values - np_offsets) /
                                   np_scales)  # No set/get cmd performed

        # test set, only for scalar values
        if not isinstance(values, Iterable):
            p(values)
            np.testing.assert_allclose(np.array(p.raw_value),
                                       np_values * np_scales +
                                       np_offsets)  # No set/get cmd performed

            # testing conversion back and forth
            p(values)
            np_get_values = np.array(p())
            np.testing.assert_allclose(np_get_values,
                                       np_values)  # No set/get cmd performed

        # adding statistics
        if isinstance(offsets, Iterable):
            event('Offset is array')
        if isinstance(scales, Iterable):
            event('Scale is array')
        if isinstance(values, Iterable):
            event('Value is array')
        if isinstance(scales, Iterable) and isinstance(offsets, Iterable):
            event('Scale is array and also offset')
        if isinstance(scales, Iterable) and not isinstance(offsets, Iterable):
            event('Scale is array but not offset')

    @given(scale=hst.integers(1, 100),
           value=hst.floats(min_value=1e-9, max_value=10))
    def test_ramp_scaled(self, scale, value):
        start_point = 0.0
        p = MemoryParameter(name='p', scale=scale, initial_value=start_point)
        assert p() == start_point
        # first set a step size
        p.step = 0.1
        # and a wait time
        p.inter_delay = 1e-9  # in seconds
        first_step = 1.0
        second_step = 10.0
        # do a step to start from a non zero starting point where
        # scale matters
        p.set(first_step)
        np.testing.assert_allclose(np.array([p.get()]), np.array([first_step]))

        expected_raw_steps = np.linspace(start_point * scale,
                                         first_step * scale, 11)
        # getting the raw values that are actually send to the instrument.
        # these are scaled in the set_wrapper
        np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)
        assert p.raw_value == first_step * scale
        # then check the generated steps. They should not be scaled as the
        # scaling happens when setting them
        expected_steps = np.linspace(first_step + p.step, second_step, 90)
        np.testing.assert_allclose(p.get_ramp_values(second_step, p.step),
                                   expected_steps)
        p.set(10)
        np.testing.assert_allclose(np.array(p.set_values),
                                   np.linspace(0.0 * scale, 10 * scale, 101))
        p.set(value)
        np.testing.assert_allclose(p.get(), value)
        assert p.raw_value == value * scale

    @given(value=hst.floats(min_value=1e-9, max_value=10))
    def test_ramp_parser(self, value):
        start_point = 0.0
        p = MemoryParameter(name='p',
                            set_parser=lambda x: -x,
                            get_parser=lambda x: -x,
                            initial_value=start_point)
        assert p() == start_point
        # first set a step size
        p.step = 0.1
        # and a wait time
        p.inter_delay = 1e-9  # in seconds
        first_step = 1.0
        second_step = 10.0
        # do a step to start from a non zero starting point where
        # scale matters
        p.set(first_step)
        assert p.get() == first_step
        assert p.raw_value == -first_step
        np.testing.assert_allclose(np.array([p.get()]), np.array([first_step]))

        expected_raw_steps = np.linspace(-start_point, -first_step, 11)
        # getting the raw values that are actually send to the instrument.
        # these are parsed in the set_wrapper
        np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)
        assert p.raw_value == -first_step
        # then check the generated steps. They should not be parsed as the
        # scaling happens when setting them
        expected_steps = np.linspace((first_step + p.step), second_step, 90)
        np.testing.assert_allclose(p.get_ramp_values(second_step, p.step),
                                   expected_steps)
        p.set(second_step)
        np.testing.assert_allclose(
            np.array(p.set_values), np.linspace(-start_point, -second_step,
                                                101))
        p.set(value)
        np.testing.assert_allclose(p.get(), value)
        assert p.raw_value == -value

    @given(scale=hst.integers(1, 100),
           value=hst.floats(min_value=1e-9, max_value=10))
    def test_ramp_parsed_scaled(self, scale, value):
        start_point = 0.0
        p = MemoryParameter(name='p',
                            scale=scale,
                            set_parser=lambda x: -x,
                            get_parser=lambda x: -x,
                            initial_value=start_point)
        assert p() == start_point
        # first set a step size
        p.step = 0.1
        # and a wait time
        p.inter_delay = 1e-9  # in seconds
        first_step = 1.0
        second_step = 10.0
        p.set(first_step)
        assert p.get() == first_step
        assert p.raw_value == -first_step * scale
        expected_raw_steps = np.linspace(-start_point * scale,
                                         -first_step * scale, 11)
        # getting the raw values that are actually send to the instrument.
        # these are parsed in the set_wrapper
        np.testing.assert_allclose(np.array(p.set_values), expected_raw_steps)
        assert p.raw_value == -scale * first_step
        expected_steps = np.linspace(first_step + p.step, second_step, 90)
        np.testing.assert_allclose(p.get_ramp_values(10, p.step),
                                   expected_steps)
        p.set(second_step)
        np.testing.assert_allclose(
            np.array(p.set_values),
            np.linspace(-start_point * scale, -second_step * scale, 101))
        p.set(value)
        np.testing.assert_allclose(p.get(), value)
        assert p.raw_value == -scale * value
Ejemplo n.º 9
0
def dtype_for_elements_strategy(s):
    return st.shared(
        s.map(lambda x: pandas.Series([x]).dtype),
        key=('hypothesis.extra.pandas.dtype_for_elements_strategy', s),
    )
Ejemplo n.º 10
0
        meta.extend([dsp.iloc[i, :].tolist()] * n)
    meta = pd.DataFrame(meta, columns=dsp.columns)

    p = sum(nreps)
    n = draw(nobs)
    observation = st.lists(observations, min_size=p, max_size=p)

    time = draw(st.lists(time, min_size=n, max_size=n, unique=True))
    data = draw(st.lists(observation, min_size=n, max_size=n))

    data = pd.DataFrame([[t] + d for t, d in zip(time, data)])

    return popmachine.DataSet(data, meta)


sharedDesignSpace = st.shared(designSpace(), key='incomplete-designspace')


@st.composite
def compendia(draw,designspace=sharedDesignSpace,
            nrep=st.integers(min_value=0,max_value=3),\
            nobs=st.integers(min_value=1,max_value=50), observations=st.floats(),\
            time=st.floats(allow_infinity=False, allow_nan=False)):
    """Create a list of datasets all created from a shared design space."""

    n = draw(st.integers(min_value=2, max_value=5))
    ds = dataset(designspace, nrep, nobs, observations, time)
    datasets = draw(st.lists(ds, min_size=n, max_size=n))
    names = draw(st.lists(charstring, min_size=n, max_size=n, unique=True))

    return names, datasets
Ejemplo n.º 11
0
def test_different_instances_are_not_shared():
    find_any(
        st.tuples(st.shared(st.integers()), st.shared(st.integers())),
        lambda x: x[0] != x[1]
    )
Ejemplo n.º 12
0
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import absolute_import, division, print_function

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import find_any, minimal

x = st.shared(st.integers())


@given(x, x)
def test_sharing_is_by_instance_by_default(a, b):
    assert a == b


@given(st.shared(st.integers(), key="hi"), st.shared(st.integers(), key="hi"))
def test_different_instances_with_the_same_key_are_shared(a, b):
    assert a == b


def test_different_instances_are_not_shared():
    find_any(
        st.tuples(st.shared(st.integers()), st.shared(st.integers())),
Ejemplo n.º 13
0
class TestParseLine:
    @given(
        st.shared(
            st.text(min_size=1).map(lambda tkn: tkn.encode("utf8")),
            key="parse-line-token",
        ),
        (st.shared(
            st.text(min_size=1).map(lambda tkn: tkn.encode("utf8")),
            key="parse-line-token",
        ).flatmap(lambda tkn: st.binary().filter(lambda ln: b"\n" not in ln and
                                                 not ln.startswith(tkn)))),
    )
    def test_invalid_token_skips(self, token, line):
        assert parse_line(line, token=token) is None

    @given(
        st.none() | st.text(min_size=1).map(lambda tkn: tkn.encode("utf8")),
        st.binary().filter(lambda ln: b"\n" not in ln),
    )
    def test_unparseable_syslog(self, caplog, token, line):
        if token is not None:
            line = token + line

        caplog.clear()

        assert parse_line(line, token=token) is None
        assert caplog.record_tuples == [("linehaul.server", logging.ERROR, ANY)
                                        ]
        assert caplog.record_tuples[0][2].startswith(
            "Unparseable syslog message")

    @given(
        st.none() | st.text(min_size=1).map(lambda tkn: tkn.encode("utf8")),
        st.binary().filter(lambda ln: b"\n" not in ln).map(
            lambda d:
            b"<134>2018-07-20T02:19:20Z cache-itm18828 linehaul[411617]: " + d
        ),
    )
    def test_unparseable_event(self, caplog, token, line):
        if token is not None:
            line = token + line

        caplog.clear()

        assert parse_line(line, token=token) is None
        assert caplog.record_tuples == [("linehaul.server", logging.ERROR, ANY)
                                        ]
        assert caplog.record_tuples[0][2].startswith("Unparseable event:")

    @pytest.mark.parametrize(
        ("event", "exception"),
        [(
            ("2@Fri, 20 Jul 2018 02:19:19 GMT|JP|/packages/ba/c8/"
             "a928c55457441c87366eb2423efca9aa0f46380994fd8a476153493c319a/"
             "cfn_flip-1.0.3.tar.gz|TLSv1.2|ECDHE-RSA-AES128-GCM-SHA256|"
             "(null)|1.0.3|sdist|bandersnatch/2.2.1 "
             "(cpython 3.7.0-final0, Darwin x86_64)"),
            TypeError,
        )],
    )
    def test_parsing_raises(self, caplog, event, exception):
        line = "<134>2018-07-20T02:19:20Z cache-itm18828 linehaul[411617]: " + event

        assert parse_line(line.encode("utf8")) is None
        assert caplog.record_tuples == [("linehaul.server", logging.ERROR,
                                         "Unhandled error:")]

    def test_returns_download_event(self):
        event = (
            "2@Fri, 20 Jul 2018 02:19:19 GMT|JP|/packages/ba/c8/"
            "a928c55457441c87366eb2423efca9aa0f46380994fd8a476153493c319a/"
            "cfn_flip-1.0.3.tar.gz|TLSv1.2|ECDHE-RSA-AES128-GCM-SHA256|"
            "cfn-flip|1.0.3|sdist|"
            "bandersnatch/2.2.1 (cpython 3.7.0-final0, Darwin x86_64)")
        line = "<134>2018-07-20T02:19:20Z cache-itm18828 linehaul[411617]: " + event

        expected = _cattr.structure(
            {
                "country_code":
                "JP",
                "details": {
                    "installer": {
                        "name": "bandersnatch",
                        "version": "2.2.1"
                    }
                },
                "file": {
                    "filename": "cfn_flip-1.0.3.tar.gz",
                    "project": "cfn-flip",
                    "type": "sdist",
                    "version": "1.0.3",
                },
                "timestamp":
                "Fri, 20 Jul 2018 02:19:19 GMT",
                "tls_cipher":
                "ECDHE-RSA-AES128-GCM-SHA256",
                "tls_protocol":
                "TLSv1.2",
                "url":
                ("/packages/ba/c8/a928c55457441c87366eb2423efca9aa0f4638099"
                 "4fd8a476153493c319a/cfn_flip-1.0.3.tar.gz"),
            },
            Download,
        )

        assert parse_line(line.encode("utf8")) == expected
Ejemplo n.º 14
0
                "TLSv1.2",
                "url":
                ("/packages/ba/c8/a928c55457441c87366eb2423efca9aa0f4638099"
                 "4fd8a476153493c319a/cfn_flip-1.0.3.tar.gz"),
            },
            Download,
        )

        assert parse_line(line.encode("utf8")) == expected


@given(
    st.builds(
        Download,
        timestamp=st.shared(
            st.dates(),
            key="extract-item-data").map(lambda i: arrow.Arrow.fromdate(i)),
    ),
    st.shared(st.dates(), key="extract-item-data").map(
        lambda i: f"{i.year:04}{i.month:02}{i.day:02}"),
)
def test_extract_item_data(download, expected):
    assert extract_item_date(download) == expected


@given(
    st.lists(
        st.builds(Download,
                  timestamp=st.dates().map(lambda i: arrow.Arrow.fromdate(i))))
)
def test_compute_batches(events):
Ejemplo n.º 15
0
BYTES_LOOKUP = {
    sre.CATEGORY_DIGIT: BYTES_DIGIT,
    sre.CATEGORY_SPACE: BYTES_SPACE,
    sre.CATEGORY_WORD: BYTES_WORD,
    sre.CATEGORY_NOT_DIGIT: BYTES_ALL - BYTES_DIGIT,
    sre.CATEGORY_NOT_SPACE: BYTES_ALL - BYTES_SPACE,
    sre.CATEGORY_NOT_WORD: BYTES_ALL - BYTES_WORD,
}

# On Python < 3.4 (including 2.7), the following unicode chars are weird.
# They are matched by the \W, meaning 'not word', but unicodedata.category(c)
# returns one of the word categories above.  There's special handling below.
HAS_WEIRD_WORD_CHARS = sys.version_info[:2] < (3, 4)
UNICODE_WEIRD_NONWORD_CHARS = set(u'\U00012432\U00012433\U00012456\U00012457')

GROUP_CACHE_STRATEGY = st.shared(st.builds(dict),
                                 key='hypothesis.regex.group_cache')


@st.composite
def update_group(draw, group_name, strategy):
    cache = draw(GROUP_CACHE_STRATEGY)
    result = draw(strategy)
    cache[group_name] = result
    return result


@st.composite
def reuse_group(draw, group_name):
    cache = draw(GROUP_CACHE_STRATEGY)
    try:
        return cache[group_name]
Ejemplo n.º 16
0
 def mutate(self, draw: Draw) -> Schema:
     # On the top level, Schemathesis creates "object" schemas for all parameter "in" values except "body", which is
     # taken as-is. Therefore we can only apply mutations that won't change the Open API semantics of the schema.
     mutations: List[Mutation]
     if self.location in ("header", "cookie", "query"):
         # These objects follow this pattern:
         # {
         #     "properties": properties,
         #     "additionalProperties": False,
         #     "type": "object",
         #     "required": required
         # }
         # Open API semantics expect mapping; therefore, they should have the "object" type.
         # We can:
         #   - remove required parameters
         #   - negate constraints (only `additionalProperties` in this case)
         #   - mutate individual properties
         mutations = draw(
             ordered((remove_required_property, negate_constraints,
                      change_properties)))
     elif self.is_path_location:
         # The same as above, but we can only mutate individual properties as their names are predefined in the
         # path template, and all of them are required.
         mutations = [change_properties]
     else:
         # Body can be of any type and does not have any specific type semantic.
         mutations = draw(ordered(get_mutations(draw, self.keywords)))
     # Deep copy all keywords to avoid modifying the original schema
     new_schema = deepcopy(self.keywords)
     enabled_mutations = draw(st.shared(FeatureStrategy(),
                                        key="mutations"))  # type: ignore
     result = MutationResult.FAILURE
     for mutation in mutations:
         if enabled_mutations.is_enabled(mutation.__name__):
             result |= mutation(self, draw, new_schema)
     if result == MutationResult.FAILURE:
         # If we failed to apply anything, then reject the whole case
         reject()  # type: ignore
     new_schema.update(self.non_keywords)
     if self.is_header_location:
         # All headers should have names that can be sent over network
         new_schema["propertyNames"] = {
             "type": "string",
             "format": "_header_name"
         }
         for sub_schema in new_schema.get("properties", {}).values():
             sub_schema["type"] = "string"
             if len(sub_schema) == 1:
                 sub_schema["format"] = "_header_value"
         if draw(st.booleans()):
             # In headers, `additionalProperties` are False by default, which means that Schemathesis won't generate
             # any headers that are not defined. This change adds the possibility of generating valid extra headers
             new_schema["additionalProperties"] = {
                 "type": "string",
                 "format": "_header_value"
             }
     # Empty array or objects may match the original schema
     if "array" in get_type(new_schema) and new_schema.get(
             "items") and "minItems" not in new_schema.get("not", {}):
         new_schema.setdefault("minItems", 1)
     if ("object" in get_type(new_schema) and new_schema.get("properties")
             and "minProperties" not in new_schema.get("not", {})):
         new_schema.setdefault("minProperties", 1)
     return new_schema
Ejemplo n.º 17
0
BYTES_WORD = {b for b in BYTES_ALL if re.match(b"\\w", b)}
BYTES_LOOKUP = {
    sre.CATEGORY_DIGIT: BYTES_DIGIT,
    sre.CATEGORY_SPACE: BYTES_SPACE,
    sre.CATEGORY_WORD: BYTES_WORD,
    sre.CATEGORY_NOT_DIGIT: BYTES_ALL - BYTES_DIGIT,
    sre.CATEGORY_NOT_SPACE: BYTES_ALL - BYTES_SPACE,
    sre.CATEGORY_NOT_WORD: BYTES_ALL - BYTES_WORD,
}

# On Python 2, these unicode chars are matched by \W, meaning 'not word',
# but unicodedata.category(c) returns one of the word categories above.
UNICODE_WEIRD_NONWORD_CHARS = set(u"\U00012432\U00012433\U00012456\U00012457")


GROUP_CACHE_STRATEGY = st.shared(st.builds(dict), key="hypothesis.regex.group_cache")


@st.composite
def update_group(draw, group_name, strategy):
    cache = draw(GROUP_CACHE_STRATEGY)
    result = draw(strategy)
    cache[group_name] = result
    return result


@st.composite
def reuse_group(draw, group_name):
    cache = draw(GROUP_CACHE_STRATEGY)
    try:
        return cache[group_name]
Ejemplo n.º 18
0
def test_different_instances_are_not_shared():
    find(
        st.tuples(st.shared(st.integers()), st.shared(st.integers())),
        lambda x: x[0] != x[1]
    )
Ejemplo n.º 19
0
).map(join_tuple)

malformed_type_strs = st.one_of(
    malformed_non_tuple_type_strs,
    malformed_tuple_type_strs,
)


#################################
# Type string w/data strategies #
#################################

MIN_LIST_SIZE = 1
MAX_LIST_SIZE = 8

uint_total_bits = st.shared(total_bits, key='uint_total_bits')
uint_strs = uint_total_bits.map('uint{}'.format)
uint_values = uint_total_bits.flatmap(lambda n: st.integers(
    min_value=0,
    max_value=2 ** n - 1,
))

int_total_bits = st.shared(total_bits, key='int_total_bits')
int_strs = int_total_bits.map('int{}'.format)
int_values = int_total_bits.flatmap(lambda n: st.integers(
    min_value=-2 ** (n - 1),
    max_value=2 ** (n - 1) - 1,
))


def scale_places(places):
Ejemplo n.º 20
0
def test_simplify_shared_linked_to_size():
    xs = find(st.lists(st.shared(st.integers())), lambda t: sum(t) >= 1000)
    assert sum(xs[:-1]) < 1000
    assert (xs[0] - 1) * len(xs) < 1000
Ejemplo n.º 21
0
        embedding = embedding_object.embedding
        labels = embedding_object.labels
        neighbors = embed.get_neighbors(embedding, labels, n_neighbors=self.n_neighbors, algorithm='brute', metric=metric)
        self.neighbors = neighbors

        ## add to existing metadata
        metadata = embedding_object.metadata
        metadata["k-nn Metric"] = metric
        metadata["Number of Neighbors"] = str(self.n_neighbors)
        metadata["Other Neighbors Information"] = other_info
        self.metadata = metadata


EmbeddingStrategy = st.builds(Embedding,
                              st.data(),
                              st.shared(st.lists(elements=st.text(average_size=10, min_size=1), min_size=7, max_size=200, unique=True), key='shared_labels'),
                              st.text(average_size=10),
                              st.text(average_size=10),
                              st.text(average_size=10),
                              st.integers(min_value=0, max_value=1000))

NeighborsStrategy = st.builds(Neighbors,
                              EmbeddingStrategy,
                              st.data(),
                              st.text(max_size=20),
                              st.sampled_from(KNN_METRICS))


def comparison_equality(comparison1, comparison2):
    """
    Equality test for the outputs of compare_neighbors.
Ejemplo n.º 22
0
        output_string = output_string[:range_start] + substitution + output_string[range_end:]

    return output_string


@_strategies.cacheable
def perturbed_strings(string_strategy):
    """
    A strategy that constructs a string using the supplied ``string_strategy``,
    and then perturbs it.
    """
    return perturbed_by_character(string_strategy) | perturbed_by_subsection(string_strategy)


@given(
    key_type=strategies.shared(strategies.sampled_from(KEY_TYPES), key="key_type"),
    serialized=strategies.shared(valid_key_string(), key="diff_serial_diff_key"),
    perturbed=perturbed_strings(strategies.shared(valid_key_string(), key="diff_serial_diff_key")),
)
@example(
    key_type=DefinitionKey,
    serialized='def-v1:000000000000000000000000+type@-',
    perturbed='def-v1:00000000000000000000000+type@-',
)
@example(
    key_type=CourseKey,
    serialized=u'library-v1:-+-+branch@-+version@000000000000000000000000',
    perturbed=u'library-v1:-+-+branch@-+versIon@000000000000000000000000',
)
@example(
    key_type=DefinitionKey,
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import absolute_import, division, print_function

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import find_any, minimal

x = st.shared(st.integers())


@given(x, x)
def test_sharing_is_by_instance_by_default(a, b):
    assert a == b


@given(st.shared(st.integers(), key="hi"), st.shared(st.integers(), key="hi"))
def test_different_instances_with_the_same_key_are_shared(a, b):
    assert a == b


def test_different_instances_are_not_shared():
    find_any(
        st.tuples(st.shared(st.integers()), st.shared(st.integers())),
Ejemplo n.º 24
0
    data_stack_depth=data_stack_depths(with_room_for_values=2), tos=numbers, nos=numbers
)
def test_minus(emulator, data_stack, data_stack_depth, tos, nos):
    # Arrange
    data_stack.set_depth_in_bytes(data_stack_depth)
    data_stack.push_word(nos)
    data_stack.push_word(tos)
    # Act
    _do_test_thread(emulator, "forth.core.-")
    # Assert
    (nos - tos) & 0xFFFF == data_stack.pop_u16()
    assert data_stack_depth == len(data_stack)


# Strategies that might generate equal values (if I understand correctly)
values = shared(numbers)
maybe_equal = values.flatmap(lambda value: one_of(just(value), numbers))


@given(
    data_stack_depth=data_stack_depths(with_room_for_values=2),
    tos=values,
    nos=maybe_equal,
)
def test_equals(emulator, data_stack, data_stack_depth, tos, nos):
    # Arrange
    data_stack.set_depth_in_bytes(data_stack_depth)
    data_stack.push_word(nos)
    data_stack.push_word(tos)
    # Act
    _do_test_thread(emulator, "forth.core.=")
Ejemplo n.º 25
0
    max_leaves=5,
).map(join_tuple)

malformed_type_strs = st.one_of(
    malformed_non_tuple_type_strs,
    malformed_tuple_type_strs,
)

#################################
# Type string w/data strategies #
#################################

MIN_LIST_SIZE = 1
MAX_LIST_SIZE = 8

uint_total_bits = st.shared(total_bits, key='uint_total_bits')
uint_strs = uint_total_bits.map('uint{}'.format)
uint_values = uint_total_bits.flatmap(lambda n: st.integers(
    min_value=0,
    max_value=2**n - 1,
))

int_total_bits = st.shared(total_bits, key='int_total_bits')
int_strs = int_total_bits.map('int{}'.format)
int_values = int_total_bits.flatmap(lambda n: st.integers(
    min_value=-2**(n - 1),
    max_value=2**(n - 1) - 1,
))


def scale_places(places):
Ejemplo n.º 26
0
def from_typing_type(thing):
    # We start with special-case support for Union and Tuple - the latter
    # isn't actually a generic type.  Support for Callable may be added to
    # this section later.
    # We then explicitly error on non-Generic types, which don't carry enough
    # information to sensibly resolve to strategies at runtime.
    # Finally, we run a variation of the subclass lookup in st.from_type
    # among generic types in the lookup.
    import typing
    # Under 3.6 Union is handled directly in st.from_type, as the argument is
    # not an instance of `type`. However, under Python 3.5 Union *is* a type
    # and we have to handle it here, including failing if it has no parameters.
    if hasattr(thing, '__union_params__'):  # pragma: no cover
        args = sorted(thing.__union_params__ or (), key=type_sorting_key)
        if not args:
            raise ResolutionFailed('Cannot resolve Union of no types.')
        return st.one_of([st.from_type(t) for t in args])
    if getattr(thing, '__origin__', None) == tuple or \
            isinstance(thing, getattr(typing, 'TupleMeta', ())):
        elem_types = getattr(thing, '__tuple_params__', None) or ()
        elem_types += getattr(thing, '__args__', None) or ()
        if getattr(thing, '__tuple_use_ellipsis__', False) or \
                len(elem_types) == 2 and elem_types[-1] is Ellipsis:
            return st.lists(st.from_type(elem_types[0])).map(tuple)
        elif len(elem_types) == 1 and elem_types[0] == ():
            return st.tuples()  # Empty tuple; see issue #1583
        return st.tuples(*map(st.from_type, elem_types))
    if isinstance(thing, typing.TypeVar):
        if getattr(thing, '__bound__', None) is not None:
            return st.from_type(thing.__bound__)
        if getattr(thing, '__constraints__', None):
            return st.shared(st.sampled_from(thing.__constraints__),
                             key='typevar-with-constraint').flatmap(
                                 st.from_type)
        # Constraints may be None or () on various Python versions.
        return st.text()  # An arbitrary type for the typevar
    # Now, confirm that we're dealing with a generic type as we expected
    if not isinstance(thing, typing_root_type):  # pragma: no cover
        raise ResolutionFailed('Cannot resolve %s to a strategy' % (thing, ))
    # Parametrised generic types have their __origin__ attribute set to the
    # un-parametrised version, which we need to use in the subclass checks.
    # e.g.:     typing.List[int].__origin__ == typing.List
    mapping = {
        k: v
        for k, v in _global_type_lookup.items()
        if isinstance(k, typing_root_type) and try_issubclass(k, thing)
    }
    if typing.Dict in mapping:
        # The subtype relationships between generic and concrete View types
        # are sometimes inconsistent under Python 3.5, so we pop them out to
        # preserve our invariant that all examples of from_type(T) are
        # instances of type T - and simplify the strategy for abstract types
        # such as Container
        for t in (typing.KeysView, typing.ValuesView, typing.ItemsView):
            mapping.pop(t, None)
    strategies = [
        v if isinstance(v, st.SearchStrategy) else v(thing)
        for k, v in mapping.items() if sum(
            try_issubclass(k, T) for T in mapping) == 1
    ]
    empty = ', '.join(repr(s) for s in strategies if s.is_empty)
    if empty or not strategies:  # pragma: no cover
        raise ResolutionFailed(
            'Could not resolve %s to a strategy; consider using '
            'register_type_strategy' % (empty or thing, ))
    return st.one_of(strategies)
Ejemplo n.º 27
0
def chromeReqResp ():
    # XXX: will this gnerated the same url for all testcases?
    reqid = st.shared (st.text (), 'reqresp')
    url = st.shared (urlsStr (), 'reqresp')
    return st.tuples (chromeRequestWillBeSent (reqid, url),
            chromeResponseReceived (reqid, url))
Ejemplo n.º 28
0
def dtype_for_elements_strategy(s):
    return st.shared(
        s.map(lambda x: pandas.Series([x]).dtype),
        key=("hypothesis.extra.pandas.dtype_for_elements_strategy", s),
    )
types = [None, "a", "b", "c"]


@composite
def fakefiles_(draw, *, files=None, minfiles=0, maxfiles=40):
    if files is None:
        files = lists(tuples(kind_floats(min_value=-5, max_value=5),
                             sampled_from(types)),
                      max_size=maxfiles,
                      min_size=minfiles)
    info = draw(files, label="file values and types")
    res = FakeFiles(draw, info)
    return res


fakefiles = lambda *a, **kw: shared(fakefiles_(*a, **kw), key="files")


@composite
def make_stats_(draw, *, minholdout=0, **kwargs):
    # TODO: reuse ratio? more realistic distribution
    stats = state.Stats()
    kwargs["minfiles"] = kwargs.get("minfiles", 0) + minholdout
    kwargs["maxfiles"] = kwargs.get("minfiles", 40) + minholdout
    files = draw(fakefiles(**kwargs))
    comparisons_per_file = draw(integers(min_value=0, max_value=7),
                                label="comparisons per file")
    assert len(files.af) - minholdout >= 0
    compared_files = draw(integers(min_value=0,
                                   max_value=len(files.af) - minholdout),
                          label="files to use")
Ejemplo n.º 30
0
def generate_tuples_of_arrays(
    draw,
    dtype_strategy=ints_floats_datetimes_and_timedeltas(),
    max_tuple_length=30,
    max_array_size=200,
    all_same_width=False,
    all_same_height=False,
):
    shape_type = draw(integers(
        min_value=0,
        max_value=4))  # 0: (1,1), 1: (x,), 2: (x,1), 3: (1,x), 4: (x,y)
    if shape_type == 1 and all_same_height:
        shape_type = 3
    dtype = shared(dtype_strategy)
    # NOTE: shape=(height, width) or shape=(width,)
    starting_size = (
        draw(integers(min_value=2, max_value=max_array_size)),
        draw(integers(min_value=2, max_value=max_array_size)),
    )
    arr_list = []
    arr_length = draw(shared(integers(min_value=1,
                                      max_value=max_tuple_length)))
    if shape_type == 0:  # (1,1)
        for i in range(arr_length):
            arr_list.append(draw(arrays(dtype=dtype, shape=(1, 1))))
    elif shape_type == 1:  # (x,)
        if all_same_width:
            for i in range(arr_length):
                arr_list.append(
                    draw(arrays(dtype=dtype, shape=(starting_size[1], ))))
        else:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(draw(arrays(dtype=dtype, shape=(x, ))))
    elif shape_type == 2:  # (x,1)
        if all_same_height:
            for i in range(arr_length):
                arr_list.append(
                    draw(arrays(dtype=dtype, shape=(starting_size[0], 1))))
        else:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(draw(arrays(dtype=dtype, shape=(x, 1))))
    elif shape_type == 3:  # (1,x)
        if all_same_width:
            for i in range(arr_length):
                arr_list.append(
                    draw(arrays(dtype=dtype, shape=(1, starting_size[1]))))
        else:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(draw(arrays(dtype=dtype, shape=(1, x))))
    elif shape_type == 4:  # (x,y)
        if all_same_width and all_same_height:
            for i in range(arr_length):
                arr_list.append(
                    draw(
                        arrays(dtype=dtype,
                               shape=(starting_size[0], starting_size[1]))))
        elif all_same_height:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(
                    draw(arrays(dtype=dtype, shape=(starting_size[0], x))))
        elif all_same_width:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(
                    draw(arrays(dtype=dtype, shape=(x, starting_size[1]))))
        else:
            for i in range(arr_length):
                x = draw(integers(min_value=1, max_value=max_array_size))
                y = draw(integers(min_value=1, max_value=max_array_size))
                arr_list.append(draw(arrays(dtype=dtype, shape=(x, y))))

    return tuple(arr_list)
Ejemplo n.º 31
0
from functools import reduce

from hypothesis import given
import hypothesis.strategies as st
import hypothesis.extra.numpy as hnp
import numpy as np
import pytest

import qsim.gate

# -- Strategies for generating values --

n_qubits = st.shared(st.integers(min_value=1, max_value=6))


# Choose which qubits from 'n_qubits' to operate on with a gate that
# operates on 'gate_size' qubits
def select_n_qubits(gate_size):
    def _strat(n_qubits):
        assert n_qubits >= gate_size
        possible_qubits = st.integers(0, n_qubits - 1)
        return st.lists(possible_qubits, gate_size, gate_size,
                        unique=True).map(tuple)

    return _strat


valid_complex = st.complex_numbers(allow_infinity=False, allow_nan=False)
phases = st.floats(min_value=0,
                   max_value=2 * np.pi,
                   allow_nan=False,
Ejemplo n.º 32
0
def generate_ndarrays(draw):
    shape = draw(shared(one_darray_shape_strategy(max_shape_size=10)))
    dtype = draw(one_of_supported_dtypes())
    arr1 = draw(arrays(shape=shape, dtype=dtype, unique=True))
    arr2 = draw(arrays(shape=shape, dtype=dtype, unique=True))
    return (arr1, arr2)
Ejemplo n.º 33
0
 def _handle_state(self, state):
     opcode, value = state
     if opcode == re.sre_parse.LITERAL:
         return strats.just(hunichr(value))
     elif opcode == re.sre_parse.NOT_LITERAL:
         return strats.characters(blacklist_characters=hunichr(value))
     elif opcode == re.sre_parse.AT:
         return strats.just('')
     elif opcode == re.sre_parse.IN:
         if value[0][0] == re.sre_parse.NEGATE:
             candidates = []
             for v in value[1:]:
                 candidates.extend(chain(*(self._handle_character_sets(v))))
             return strats.characters(blacklist_characters=candidates)
         else:
             candidates = []
             for v in value:
                 candidates.extend(chain(*(self._handle_character_sets(v))))
             return strats.sampled_from(candidates)
     elif opcode == re.sre_parse.ANY:
         return strats.characters()
     elif opcode == re.sre_parse.BRANCH:
         branches = []
         for val in value[1]:
             branch = [self._handle_state(v) for v in val]
             branches.append(strategy_concat(branch))
         return strats.one_of(branches)
     elif opcode == re.sre_parse.SUBPATTERN:
         parts = []
         for part in value[1]:
             parts.append(self._handle_state(part))
         result = strategy_concat(parts)
         if value[0]:
             self.cache[value[0]] = result
             result = strats.shared(result, key=value[0])
         return result
     elif opcode == re.sre_parse.ASSERT:
         result = []
         for part in value[1]:
             result.append(self._handle_state(part))
         return strategy_concat(result)
     elif opcode == re.sre_parse.ASSERT_NOT:
         return strats.just('')
     elif opcode == re.sre_parse.GROUPREF:
         return strats.shared(self.cache[value], key=value)
     elif opcode == re.sre_parse.MIN_REPEAT:
         start_range, end_range, val = value
         result = []
         for v in val:
             part = strats.lists(
                 self._handle_state(v),
                 min_size=start_range,
                 max_size=end_range).map(lambda x: u"".join(x))
             result.append(part)
         return strategy_concat(result)
     elif opcode == re.sre_parse.MAX_REPEAT:
         start_range, end_range, val = value
         result = []
         for v in val:
             part = strats.lists(
                 self._handle_state(v),
                 min_size=start_range,
                 max_size=end_range).map(lambda x: u"".join(x))
             result.append(part)
         return strats.tuples(*result).map(lambda x: u"".join(x))
     else:
         import ipdb
         ipdb.set_trace()
         raise NotImplementedError(opcode)

unsized_list_raw_strats = [
    (type_str + "[]", st.lists(type_strat, min_size=0, max_size=MAX_LIST_SIZE).map(tuple))
    for type_str, type_strat in all_basic_raw_strats
]
unsized_list_strats = [
    st.tuples(st.just(type_str), type_strat)
    for type_str, type_strat in unsized_list_raw_strats
]


sized_list_strats = [
    st.tuples(
        st.shared(
            st.integers(min_value=MIN_LIST_SIZE, max_value=MAX_LIST_SIZE),
            key="n",
        ).map(lambda n: type_str + "[{0}]".format(n)),
        st.shared(
            st.integers(min_value=MIN_LIST_SIZE, max_value=MAX_LIST_SIZE),
            key="n",
        ).flatmap(lambda n: st.lists(type_strat, min_size=n, max_size=n).map(tuple))
    ) for type_str, type_strat in all_basic_raw_strats
]


def zip_types_and_values(types_and_values):
    types, values = zip(*types_and_values)
    return tuple(types), tuple(values)


single_abi_strats = st.one_of(itertools.chain(
Ejemplo n.º 35
0
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import find, given

x = st.shared(st.integers())


@given(x, x)
def test_sharing_is_by_instance_by_default(a, b):
    assert a == b


@given(
    st.shared(st.integers(), key='hi'), st.shared(st.integers(), key='hi'))
def test_different_instances_with_the_same_key_are_shared(a, b):
    assert a == b


def test_different_instances_are_not_shared():
    find(
Ejemplo n.º 36
0
def from_typing_type(thing):
    # We start with special-case support for Union and Tuple - the latter
    # isn't actually a generic type. Then we handle Literal since it doesn't
    # support `isinstance`. Support for Callable may be added to this section
    # later.
    # We then explicitly error on non-Generic types, which don't carry enough
    # information to sensibly resolve to strategies at runtime.
    # Finally, we run a variation of the subclass lookup in st.from_type
    # among generic types in the lookup.
    import typing

    # Under 3.6 Union is handled directly in st.from_type, as the argument is
    # not an instance of `type`. However, under Python 3.5 Union *is* a type
    # and we have to handle it here, including failing if it has no parameters.
    if hasattr(thing, "__union_params__"):  # pragma: no cover
        args = sorted(thing.__union_params__ or (), key=type_sorting_key)
        if not args:
            raise ResolutionFailed("Cannot resolve Union of no types.")
        return st.one_of([st.from_type(t) for t in args])
    if getattr(thing, "__origin__", None) == tuple or isinstance(
        thing, getattr(typing, "TupleMeta", ())
    ):
        elem_types = getattr(thing, "__tuple_params__", None) or ()
        elem_types += getattr(thing, "__args__", None) or ()
        if (
            getattr(thing, "__tuple_use_ellipsis__", False)
            or len(elem_types) == 2
            and elem_types[-1] is Ellipsis
        ):
            return st.lists(st.from_type(elem_types[0])).map(tuple)
        elif len(elem_types) == 1 and elem_types[0] == ():
            return st.tuples()  # Empty tuple; see issue #1583
        return st.tuples(*map(st.from_type, elem_types))
    if (
        hasattr(typing, "Final") and getattr(thing, "__origin__", None) == typing.Final
    ):  # pragma: no cover  # new in Python 3.8
        return st.one_of([st.from_type(t) for t in thing.__args__])
    if is_typing_literal(thing):  # pragma: no cover  # new in Python 3.8
        args_dfs_stack = list(thing.__args__)
        literals = []
        while args_dfs_stack:
            arg = args_dfs_stack.pop()
            if is_typing_literal(arg):
                args_dfs_stack.extend(reversed(arg.__args__))
            else:
                literals.append(arg)
        return st.sampled_from(literals)
    if isinstance(thing, typing.TypeVar):
        if getattr(thing, "__bound__", None) is not None:
            strat = unwrap_strategies(st.from_type(thing.__bound__))
            if not isinstance(strat, OneOfStrategy):
                return strat
            # The bound was a union, or we resolved it as a union of subtypes,
            # so we need to unpack the strategy to ensure consistency across uses.
            # This incantation runs a sampled_from over the strategies inferred for
            # each part of the union, wraps that in shared so that we only generate
            # from one type per testcase, and flatmaps that back to instances.
            return st.shared(
                st.sampled_from(strat.original_strategies), key="typevar=%r" % (thing,)
            ).flatmap(lambda s: s)
        if getattr(thing, "__constraints__", None):
            return st.shared(
                st.sampled_from(thing.__constraints__), key="typevar=%r" % (thing,)
            ).flatmap(st.from_type)
        # Constraints may be None or () on various Python versions.
        return st.text()  # An arbitrary type for the typevar
    # Now, confirm that we're dealing with a generic type as we expected
    if not isinstance(thing, typing_root_type):  # pragma: no cover
        raise ResolutionFailed("Cannot resolve %s to a strategy" % (thing,))
    # Parametrised generic types have their __origin__ attribute set to the
    # un-parametrised version, which we need to use in the subclass checks.
    # e.g.:     typing.List[int].__origin__ == typing.List
    mapping = {
        k: v
        for k, v in _global_type_lookup.items()
        if isinstance(k, typing_root_type) and try_issubclass(k, thing)
    }
    if typing.Dict in mapping:
        # The subtype relationships between generic and concrete View types
        # are sometimes inconsistent under Python 3.5, so we pop them out to
        # preserve our invariant that all examples of from_type(T) are
        # instances of type T - and simplify the strategy for abstract types
        # such as Container
        for t in (typing.KeysView, typing.ValuesView, typing.ItemsView):
            mapping.pop(t, None)
    strategies = [
        v if isinstance(v, st.SearchStrategy) else v(thing)
        for k, v in mapping.items()
        if sum(try_issubclass(k, T) for T in mapping) == 1
    ]
    empty = ", ".join(repr(s) for s in strategies if s.is_empty)
    if empty or not strategies:  # pragma: no cover
        raise ResolutionFailed(
            "Could not resolve %s to a strategy; consider using "
            "register_type_strategy" % (empty or thing,)
        )
    return st.one_of(strategies)
Ejemplo n.º 37
0
def from_typing_type(thing):
    # We start with special-case support for Union and Tuple - the latter
    # isn't actually a generic type.  Support for Callable may be added to
    # this section later.
    # We then explicitly error on non-Generic types, which don't carry enough
    # information to sensibly resolve to strategies at runtime.
    # Finally, we run a variation of the subclass lookup in st.from_type
    # among generic types in the lookup.
    import typing
    # Under 3.6 Union is handled directly in st.from_type, as the argument is
    # not an instance of `type`. However, under Python 3.5 Union *is* a type
    # and we have to handle it here, including failing if it has no parameters.
    if hasattr(thing, '__union_params__'):  # pragma: no cover
        args = sorted(thing.__union_params__ or (), key=type_sorting_key)
        if not args:
            raise ResolutionFailed('Cannot resolve Union of no types.')
        return st.one_of([st.from_type(t) for t in args])
    if getattr(thing, '__origin__', None) == tuple or \
            isinstance(thing, getattr(typing, 'TupleMeta', ())):
        elem_types = getattr(thing, '__tuple_params__', None) or ()
        elem_types += getattr(thing, '__args__', None) or ()
        if getattr(thing, '__tuple_use_ellipsis__', False) or \
                len(elem_types) == 2 and elem_types[-1] is Ellipsis:
            return st.lists(st.from_type(elem_types[0])).map(tuple)
        elif len(elem_types) == 1 and elem_types[0] == ():
            return st.tuples()  # Empty tuple; see issue #1583
        return st.tuples(*map(st.from_type, elem_types))
    if isinstance(thing, typing.TypeVar):
        if getattr(thing, '__bound__', None) is not None:
            return st.from_type(thing.__bound__)
        if getattr(thing, '__constraints__', None):
            return st.shared(
                st.sampled_from(thing.__constraints__),
                key='typevar-with-constraint'
            ).flatmap(st.from_type)
        # Constraints may be None or () on various Python versions.
        return st.text()  # An arbitrary type for the typevar
    # Now, confirm that we're dealing with a generic type as we expected
    if not isinstance(thing, typing_root_type):  # pragma: no cover
        raise ResolutionFailed('Cannot resolve %s to a strategy' % (thing,))
    # Parametrised generic types have their __origin__ attribute set to the
    # un-parametrised version, which we need to use in the subclass checks.
    # e.g.:     typing.List[int].__origin__ == typing.List
    mapping = {k: v for k, v in _global_type_lookup.items()
               if isinstance(k, typing_root_type) and try_issubclass(k, thing)}
    if typing.Dict in mapping:
        # The subtype relationships between generic and concrete View types
        # are sometimes inconsistent under Python 3.5, so we pop them out to
        # preserve our invariant that all examples of from_type(T) are
        # instances of type T - and simplify the strategy for abstract types
        # such as Container
        for t in (typing.KeysView, typing.ValuesView, typing.ItemsView):
            mapping.pop(t, None)
    strategies = [v if isinstance(v, st.SearchStrategy) else v(thing)
                  for k, v in mapping.items()
                  if sum(try_issubclass(k, T) for T in mapping) == 1]
    empty = ', '.join(repr(s) for s in strategies if s.is_empty)
    if empty or not strategies:  # pragma: no cover
        raise ResolutionFailed(
            'Could not resolve %s to a strategy; consider using '
            'register_type_strategy' % (empty or thing,))
    return st.one_of(strategies)
Ejemplo n.º 38
0
    def collect_symbols(cls_name, cls_dict):
        domain_range = cls_dict["domain_range"]
        domain_strategy = cls_dict["domain_strategy"] = hyp_st.shared(
            ndarray_shape_st(domain_range), key=cls_name)

        generation_strategy_factories = cls_dict[
            "generation_strategy_factories"] = dict()
        implementation_strategy_factories = cls_dict[
            "implementation_strategy_factories"] = dict()
        global_boundaries = cls_dict["global_boundaries"] = dict()
        constants = cls_dict["constants"] = dict()
        singletons = cls_dict["singletons"] = dict()
        cls_dict["field_params"] = field_params = {}
        max_boundary = ((0, 0), (0, 0), (0, 0))
        for symbol in cls_dict["symbols"].values():
            if symbol.kind == SymbolKind.FIELD:
                max_boundary = tuple(
                    (max(m[0], b[0]), max(m[1], b[1]))
                    for m, b in zip(max_boundary, symbol.boundary))
        cls_dict["max_boundary"] = max_boundary

        for name, symbol in cls_dict["symbols"].items():
            if symbol.kind == SymbolKind.GLOBAL_STRATEGY:
                generation_strategy_factories[name] = symbol.value_st_factory
            elif symbol.kind == SymbolKind.GLOBAL_SET:
                constants[name] = symbol.values
            elif symbol.kind == SymbolKind.SINGLETON:
                singletons[name] = symbol.values[0]
            elif symbol.kind == SymbolKind.FIELD:
                if symbol.axes:
                    axes = symbol.axes
                    extra_shape = tuple(b[0] +
                                        b[1] if ax in symbol.axes else None
                                        for b, ax in zip(max_boundary, "IJK"))
                else:
                    axes = "IJK"
                    extra_shape = tuple(b[0] + b[1] for b in max_boundary)

                if symbol.data_dims:
                    data_dims = symbol.data_dims
                    extra_shape = (*extra_shape, *symbol.data_dims)
                else:
                    data_dims = tuple()

                field_params[name] = (axes, data_dims)

                global_boundaries[name] = symbol.boundary
                shape_strategy = derived_shape_st(domain_strategy, extra_shape)

                # Use default arguments to pass values avoiding late binding problems
                def implementation_strategy_factory(
                        dt,
                        shape=shape_strategy,
                        value_st_factory=symbol.value_st_factory):
                    return ndarray_st(dt, shape, value_st_factory)

                implementation_strategy_factories[
                    name] = implementation_strategy_factory
            elif symbol.kind == SymbolKind.PARAMETER:
                implementation_strategy_factories[
                    name] = symbol.value_st_factory
            elif symbol.kind == SymbolKind.NONE:
                implementation_strategy_factories[
                    name] = symbol.value_st_factory

            else:
                raise AssertionError

        cls_dict["origin"] = tuple(o[0] for o in max_boundary)
Ejemplo n.º 39
0
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import minimal, find_any

x = st.shared(st.integers())


@given(x, x)
def test_sharing_is_by_instance_by_default(a, b):
    assert a == b


@given(
    st.shared(st.integers(), key='hi'), st.shared(st.integers(), key='hi'))
def test_different_instances_with_the_same_key_are_shared(a, b):
    assert a == b


def test_different_instances_are_not_shared():
    find_any(
Ejemplo n.º 40
0
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import find, given

x = st.shared(st.integers())


@given(x, x)
def test_sharing_is_by_instance_by_default(a, b):
    assert a == b


@given(st.shared(st.integers(), key='hi'), st.shared(st.integers(), key='hi'))
def test_different_instances_with_the_same_key_are_shared(a, b):
    assert a == b


def test_different_instances_are_not_shared():
    find(st.tuples(st.shared(st.integers()), st.shared(st.integers())),
         lambda x: x[0] != x[1])
Ejemplo n.º 41
0
def test_different_keys_are_not_shared():
    find(
        st.tuples(st.shared(st.integers(), key=1),
                  st.shared(st.integers(), key=2)), lambda x: x[0] != x[1])
Ejemplo n.º 42
0
def test_keys_and_default_are_not_shared():
    find(st.tuples(st.shared(st.integers(), key=1), st.shared(st.integers())),
         lambda x: x[0] != x[1])
Ejemplo n.º 43
0
def test_can_simplify_shared_lists():
    xs = minimal(
        st.lists(st.shared(st.integers())),
        lambda x: len(x) >= 10 and x[0] != 0
    )
    assert xs == [1] * 10
Ejemplo n.º 44
0
def test_can_simplify_shared_lists():
    xs = find(st.lists(st.shared(st.integers())),
              lambda x: len(x) >= 10 and x[0] != 0)
    assert xs == [1] * 10
Ejemplo n.º 45
0
def distinct_integers(draw):
    used = draw(st.shared(st.builds(set), key='distinct_integers.used'))
    i = draw(st.integers(0, 2 ** 64 - 1).filter(lambda x: x not in used))
    used.add(i)
    return i
Ejemplo n.º 46
0
import kernel_fca_oo as krn




@pytest.fixture(scope='function')
def kernel_classes():
    return [krn.KernelSystemNP, krn.KernelSystemDF, krn.FCASystemDF]


arr_shape_strat = st.tuples(st.integers(min_value=2, max_value=10), 
                            st.integers(min_value=2, max_value=10))
bin_int_strat = st.integers(min_value=0, max_value=1)
arr_strat = arr_shape_strat.flatmap(lambda t: st_np.arrays(np.int8, (t[0], t[1]), 
                                                           elements=bin_int_strat))
intent_strat = st.shared(arr_strat, key=1).flatmap(lambda a: st.sets(st.integers(0, a.shape[1]-1)))


@given(st.shared(arr_strat, key=1), intent_strat)
def test_ei(kernel_classes, arr, gen_intent):
    #print(arr)
    #print(arr2)
    #print('_______')
    df = pd.DataFrame(arr)
    for K in kernel_classes:
        if K == krn.KernelSystemNP:
            ks = K(arr)
        else:
            ks = K(df)
        extent = ks.extent(gen_intent)
        intent = set(ks.intent(extent))