def test_given_twice_is_same():
    @given(st.data(), st.data())
    def test(data1, data2):
        data1.draw(st.integers())
        data2.draw(st.integers())
        raise ValueError()

    with raises(ValueError):
        with capture_out() as out:
            with reporting.with_reporter(reporting.default):
                test()
    result = out.getvalue()
    assert "Draw 1: 0" in result
    assert "Draw 2: 0" in result
def run_state_machine_as_test(state_machine_factory, settings=None):
    """Run a state machine definition as a test, either silently doing nothing
    or printing a minimal breaking program and raising an exception.

    state_machine_factory is anything which returns an instance of
    GenericStateMachine when called with no arguments - it can be a class or a
    function. settings will be used to control the execution of the test.
    """
    if settings is None:
        try:
            settings = state_machine_factory.TestCase.settings
            check_type(Settings, settings, "state_machine_factory.TestCase.settings")
        except AttributeError:
            settings = Settings(deadline=None, suppress_health_check=HealthCheck.all())
    check_type(Settings, settings, "settings")

    @settings
    @given(st.data())
    def run_state_machine(factory, data):
        machine = factory()
        check_type(GenericStateMachine, machine, "state_machine_factory()")
        data.conjecture_data.hypothesis_runner = machine

        n_steps = settings.stateful_step_count
        should_continue = cu.many(
            data.conjecture_data, min_size=1, max_size=n_steps, average_size=n_steps
        )

        print_steps = (
            current_build_context().is_final or current_verbosity() >= Verbosity.debug
        )
        try:
            if print_steps:
                machine.print_start()
            machine.check_invariants()

            while should_continue.more():
                value = data.conjecture_data.draw(machine.steps())
                if print_steps:
                    machine.print_step(value)
                machine.execute_step(value)
                machine.check_invariants()
        finally:
            if print_steps:
                machine.print_end()
            machine.teardown()

    # Use a machine digest to identify stateful tests in the example database
    run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest(
        state_machine_factory
    )
    # Copy some attributes so @seed and @reproduce_failure "just work"
    run_state_machine._hypothesis_internal_use_seed = getattr(
        state_machine_factory, "_hypothesis_internal_use_seed", None
    )
    run_state_machine._hypothesis_internal_use_reproduce_failure = getattr(
        state_machine_factory, "_hypothesis_internal_use_reproduce_failure", None
    )

    run_state_machine(state_machine_factory)
def test_slow_generation_inline_fails_a_health_check():
    @settings(deadline=None)
    @given(st.data())
    def test(data):
        data.draw(st.integers().map(lambda x: time.sleep(0.2)))

    with raises(FailedHealthCheck):
        test()
def test_does_print_reproduction_for_simple_data_examples_by_default():
    @given(st.data())
    def test(data):
        data.draw(st.integers())
        assert False

    with capture_out() as o:
        with pytest.raises(AssertionError):
            test()
    assert '@reproduce_failure' in o.getvalue()
def test_does_not_print_reproduction_if_verbosity_set_to_quiet():
    @given(st.data())
    @settings(verbosity=Verbosity.quiet)
    def test_always_fails(data):
        assert data.draw(st.just(False))

    with capture_out() as out:
        with pytest.raises(AssertionError):
            test_always_fails()

    assert '@reproduce_failure' not in out.getvalue()
def test_does_not_print_reproduction_for_large_data_examples_by_default():
    @settings(phases=no_shrink)
    @given(st.data())
    def test(data):
        b = data.draw(st.binary(min_size=1000, max_size=1000))
        if len(zlib.compress(b)) > 1000:
            raise ValueError()

    with capture_out() as o:
        with pytest.raises(ValueError):
            test()
    assert '@reproduce_failure' not in o.getvalue()
def test_error_is_in_finally():
    @given(st.data())
    def test(d):
        try:
            d.draw(st.lists(st.integers(), min_size=3, unique=True))
        finally:
            raise ValueError()

    with capture_out() as o:
        with pytest.raises(ValueError):
            test()

    assert "[0, 1, -1]" in o.getvalue()
def test_prints_labels_if_given_on_failure():
    @given(st.data())
    def test(data):
        x = data.draw(st.lists(st.integers(0, 10), min_size=2), label="Some numbers")
        y = data.draw(st.sampled_from(x), label="A number")
        assert y in x
        x.remove(y)
        assert y not in x

    with raises(AssertionError):
        with capture_out() as out:
            with reporting.with_reporter(reporting.default):
                test()
    result = out.getvalue()
    assert "Draw 1 (Some numbers): [0, 0]" in result
    assert "Draw 2 (A number): 0" in result
def test_prints_on_failure():
    @given(st.data())
    def test(data):
        x = data.draw(st.lists(st.integers(), min_size=1))
        y = data.draw(st.sampled_from(x))
        assert y in x
        x.remove(y)
        assert y not in x

    with raises(AssertionError):
        with capture_out() as out:
            with reporting.with_reporter(reporting.default):
                test()
    result = out.getvalue()
    assert 'Draw 1: [0, 0]' in result
    assert 'Draw 2: 0' in result
def test_prints_on_failure():
    @given(st.data())
    def test(data):
        x = data.draw(st.lists(st.integers(0, 10), min_size=2))
        y = data.draw(st.sampled_from(x))
        x.remove(y)
        if y in x:
            raise ValueError()

    with raises(ValueError):
        with capture_out() as out:
            with reporting.with_reporter(reporting.default):
                test()
    result = out.getvalue()
    assert "Draw 1: [0, 0]" in result
    assert "Draw 2: 0" in result
def test_should_only_fail_a_deadline_if_the_test_is_slow(slow_strategy, slow_test):
    s = st.integers()
    if slow_strategy:
        s = s.map(lambda x: time.sleep(0.08))

    @settings(deadline=50)
    @given(st.data())
    def test(data):
        data.draw(s)
        if slow_test:
            time.sleep(0.1)

    if slow_test:
        with pytest.raises(DeadlineExceeded):
            test()
    else:
        test()
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import absolute_import, division, print_function

import pytest

from hypothesis import find, given, reporting, strategies as st
from hypothesis.errors import InvalidArgument
from tests.common.utils import capture_out, raises


@given(st.integers(), st.data())
def test_conditional_draw(x, data):
    y = data.draw(st.integers(min_value=x))
    assert y >= x


def test_prints_on_failure():
    @given(st.data())
    def test(data):
        x = data.draw(st.lists(st.integers(0, 10), min_size=2))
        y = data.draw(st.sampled_from(x))
        x.remove(y)
        if y in x:
            raise ValueError()

    with raises(ValueError):
Example #13
0
#     lengths = torch.tensor(
#         [data.draw(integers(min_value=2, max_value=N)) for b in range(batch - 1)] + [N]
#     )
#     marginals2 = model().marginals(vals, lengths=lengths, _autograd=True)
#     v, _, alpha = model()._dp(vals, lengths=lengths)
#     marginals = model()._dp_backward(vals, lengths, alpha, v)

#     if isinstance(marginals, tuple):
#         for i, (m1, m2) in enumerate(zip(marginals[:], marginals2[:])):
#             assert torch.isclose(m1, m2).all(), (not torch.isclose(m1, m2)).nonzero()
#     else:
#         assert torch.isclose(marginals, marginals2).all()


@given(data())
def test_entropy(data):
    model = data.draw(sampled_from([LinearChain, SemiMarkov]))
    semiring = EntropySemiring
    struct = model(semiring)
    vals, (batch, N) = model._rand()
    alpha = struct.sum(vals)

    log_z = model(LogSemiring).sum(vals)
    _, log_probs = model(LogSemiring).enumerate(vals)
    log_probs = torch.stack(log_probs, dim=1) - log_z
    print(log_probs.shape, log_z.shape, log_probs.exp().sum(1))
    entropy = -log_probs.mul(log_probs.exp()).sum(1).squeeze(0)
    assert entropy.shape == alpha.shape
    assert torch.isclose(entropy, alpha).all()
Example #14
0
def test_channel_is_on_on(instrument):
    for channel_id in instrument.channel_ids:
        channel = instrument.channel(channel_id)
        channel.is_on = True
        assert channel.is_on


def test_channel_is_on_off(instrument):
    for channel_id in instrument.channel_ids:
        channel = instrument.channel(channel_id)
        channel.is_on = False
        assert not channel.is_on


@given(data=data())
def test_set_voltage_setpoint_level(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    voltage = data.draw(
        floats(channel.voltage.protection.min, channel.voltage.protection.max).map(
            lambda v: round(v, 3)))
    channel.voltage.setpoint.level = voltage
    assert channel.voltage.setpoint.level == voltage


@given(data=data())
def test_set_voltage_setpoint_step_increment(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    increment = data.draw(
Example #15
0

@given(simple_classes())
def test_structure_simple_from_dict(converter, cl_and_vals):
    # type: (Converter, Any) -> None
    """Test structuring non-nested attrs classes dumped with asdict."""
    cl, vals = cl_and_vals
    obj = cl(*vals)

    dumped = asdict(obj)
    loaded = converter.structure(dumped, cl)

    assert obj == loaded


@given(simple_classes(defaults=True, min_attrs=1, frozen=False), data())
def test_structure_simple_from_dict_default(converter, cl_and_vals, data):
    """Test structuring non-nested attrs classes with default value."""
    cl, vals = cl_and_vals
    obj = cl(*vals)
    attrs_with_defaults = [
        a for a in fields(cl)
        if (a.default is not MISSING) or (a.default_factory is not MISSING)
    ]
    to_remove = data.draw(
        lists(elements=sampled_from(attrs_with_defaults), unique=True))

    for a in to_remove:
        if a.default is not MISSING:
            setattr(obj, a.name, a.default)
        elif a.default_factory is not MISSING:
class BijectorPropertiesTest(test_util.TestCase):

  def _draw_bijector(self, bijector_name, data,
                     batch_shape=None, allowed_bijectors=None,
                     validate_args=True):
    event_dim = data.draw(hps.integers(min_value=2, max_value=6))
    bijector = data.draw(
        bijectors(bijector_name=bijector_name, event_dim=event_dim,
                  enable_vars=True, batch_shape=batch_shape,
                  allowed_bijectors=allowed_bijectors,
                  validate_args=validate_args))
    self.evaluate(tf.group(*[v.initializer for v in bijector.variables]))
    return bijector, event_dim

  def _draw_domain_tensor(self, bijector, data, event_dim, sample_shape=()):
    # TODO(axch): Would be nice to get rid of all this shape inference logic and
    # just rely on a notion of batch and event shape for bijectors, so we can
    # pass those through `domain_tensors` and `codomain_tensors` and use
    # `tensors_in_support`.  However, `RationalQuadraticSpline` behaves weirdly
    # somehow and I got confused.
    codomain_event_shape = [event_dim] * bijector.inverse_min_event_ndims
    codomain_event_shape = constrain_inverse_shape(
        bijector, codomain_event_shape)
    shp = bijector.inverse_event_shape(codomain_event_shape)
    shp = functools.reduce(tensorshape_util.concatenate, [
        sample_shape,
        data.draw(
            tfp_hps.broadcast_compatible_shape(
                shp[:shp.ndims - bijector.forward_min_event_ndims])),
        shp[shp.ndims - bijector.forward_min_event_ndims:]])
    xs = tf.identity(data.draw(domain_tensors(bijector, shape=shp)), name='xs')

    return xs

  def _draw_codomain_tensor(self, bijector, data, event_dim, sample_shape=()):
    return self._draw_domain_tensor(tfb.Invert(bijector),
                                    data=data,
                                    event_dim=event_dim,
                                    sample_shape=sample_shape)

  @parameterized.named_parameters(
      {'testcase_name': bname, 'bijector_name': bname}
      for bname in TF2_FRIENDLY_BIJECTORS)
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testBijector(self, bijector_name, data):
    tfp_hps.guitar_skip_if_matches('Tanh', bijector_name, 'b/144163991')

    bijector, event_dim = self._draw_bijector(bijector_name, data)

    # Forward mapping: Check differentiation through forward mapping with
    # respect to the input and parameter variables.  Also check that any
    # variables are not referenced overmuch.
    xs = self._draw_domain_tensor(bijector, data, event_dim)
    wrt_vars = [xs] + [v for v in bijector.trainable_variables
                       if v.dtype.is_floating]
    with tf.GradientTape() as tape:
      with tfp_hps.assert_no_excessive_var_usage(
          'method `forward` of {}'.format(bijector)):
        tape.watch(wrt_vars)
        # TODO(b/73073515): Fix graph mode gradients with bijector caching.
        ys = bijector.forward(xs + 0)
    grads = tape.gradient(ys, wrt_vars)
    assert_no_none_grad(bijector, 'forward', wrt_vars, grads)

    # For scalar bijectors, verify correctness of the _is_increasing method.
    # TODO(b/148459057): Except, don't verify Softfloor on Guitar because
    # of numerical problem.
    def exception(bijector):
      if not tfp_hps.running_under_guitar():
        return False
      if isinstance(bijector, tfb.Softfloor):
        return True
      if isinstance(bijector, tfb.Invert):
        return exception(bijector.bijector)
      return False
    if (bijector.forward_min_event_ndims == 0 and
        bijector.inverse_min_event_ndims == 0 and
        not exception(bijector)):
      dydx = grads[0]
      hp.note('dydx: {}'.format(dydx))
      isfinite = tf.math.is_finite(dydx)
      incr_or_slope_eq0 = bijector._internal_is_increasing() | tf.equal(dydx, 0)  # pylint: disable=protected-access
      self.assertAllEqual(
          isfinite & incr_or_slope_eq0,
          isfinite & (dydx >= 0) | tf.zeros_like(incr_or_slope_eq0))

    # FLDJ: Check differentiation through forward log det jacobian with
    # respect to the input and parameter variables.  Also check that any
    # variables are not referenced overmuch.
    event_ndims = data.draw(
        hps.integers(
            min_value=bijector.forward_min_event_ndims,
            max_value=xs.shape.ndims))
    with tf.GradientTape() as tape:
      max_permitted = _ldj_tensor_conversions_allowed(bijector, is_forward=True)
      with tfp_hps.assert_no_excessive_var_usage(
          'method `forward_log_det_jacobian` of {}'.format(bijector),
          max_permissible=max_permitted):
        tape.watch(wrt_vars)
        # TODO(b/73073515): Fix graph mode gradients with bijector caching.
        ldj = bijector.forward_log_det_jacobian(xs + 0, event_ndims=event_ndims)
    grads = tape.gradient(ldj, wrt_vars)
    assert_no_none_grad(bijector, 'forward_log_det_jacobian', wrt_vars, grads)

    # Inverse mapping: Check differentiation through inverse mapping with
    # respect to the codomain "input" and parameter variables.  Also check that
    # any variables are not referenced overmuch.
    ys = self._draw_codomain_tensor(bijector, data, event_dim)
    wrt_vars = [ys] + [v for v in bijector.trainable_variables
                       if v.dtype.is_floating]
    with tf.GradientTape() as tape:
      with tfp_hps.assert_no_excessive_var_usage(
          'method `inverse` of {}'.format(bijector)):
        tape.watch(wrt_vars)
        # TODO(b/73073515): Fix graph mode gradients with bijector caching.
        xs = bijector.inverse(ys + 0)
    grads = tape.gradient(xs, wrt_vars)
    assert_no_none_grad(bijector, 'inverse', wrt_vars, grads)

    # ILDJ: Check differentiation through inverse log det jacobian with respect
    # to the codomain "input" and parameter variables.  Also check that any
    # variables are not referenced overmuch.
    event_ndims = data.draw(
        hps.integers(
            min_value=bijector.inverse_min_event_ndims,
            max_value=ys.shape.ndims))
    with tf.GradientTape() as tape:
      max_permitted = _ldj_tensor_conversions_allowed(
          bijector, is_forward=False)
      with tfp_hps.assert_no_excessive_var_usage(
          'method `inverse_log_det_jacobian` of {}'.format(bijector),
          max_permissible=max_permitted):
        tape.watch(wrt_vars)
        # TODO(b/73073515): Fix graph mode gradients with bijector caching.
        ldj = bijector.inverse_log_det_jacobian(ys + 0, event_ndims=event_ndims)
    grads = tape.gradient(ldj, wrt_vars)
    assert_no_none_grad(bijector, 'inverse_log_det_jacobian', wrt_vars, grads)

    # Verify that `_is_permutation` implies constant zero Jacobian.
    if bijector._is_permutation:
      self.assertTrue(bijector._is_constant_jacobian)
      self.assertAllEqual(ldj, 0.)

    # Check that the outputs of forward_dtype and inverse_dtype match the dtypes
    # of the outputs of forward and inverse.
    self.assertAllEqualNested(ys.dtype, bijector.forward_dtype(xs.dtype))
    self.assertAllEqualNested(xs.dtype, bijector.inverse_dtype(ys.dtype))

  @parameterized.named_parameters({
      'testcase_name': bname, 'bijector_name': bname
  } for bname in TF2_FRIENDLY_BIJECTORS)
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testParameterProperties(self, bijector_name, data):
    if tf.config.functions_run_eagerly() or not tf.executing_eagerly():
      self.skipTest('To reduce test weight, parameter properties tests run in '
                    'eager mode only.')

    non_trainable_params = (
        'bijector',  # Several.
        'forward_fn',  # Inline.
        'inverse_fn',  # Inline.
        'forward_min_event_ndims',  # Inline.
        'inverse_min_event_ndims',  # Inline.
        'event_shape_out',  # Reshape.
        'event_shape_in',  # Reshape.
        'perm',  # Transpose.
        'rightmost_transposed_ndims',  # Transpose.
        'diag_bijector',  # TransformDiagonal.
        'diag_shift'  # FillScaleTriL (doesn't support batch shape).
    )
    bijector, event_dim = self._draw_bijector(
        bijector_name, data,
        validate_args=True,
        allowed_bijectors=TF2_FRIENDLY_BIJECTORS)

    # Extract the full shape of an output from this bijector.
    xs = self._draw_domain_tensor(bijector, data, event_dim)
    ys = bijector.forward(xs)
    output_shape = prefer_static.shape(ys)
    sample_and_batch_ndims = (prefer_static.rank_from_shape(output_shape) -
                              bijector.inverse_min_event_ndims)

    try:
      params = type(bijector).parameter_properties()
      params64 = type(bijector).parameter_properties(dtype=tf.float64)
    except NotImplementedError as e:
      self.skipTest(str(e))

    seeds = samplers.split_seed(test_util.test_seed(), n=len(params))
    new_parameters = {}
    for i, (param_name, param) in enumerate(params.items()):
      if param_name in non_trainable_params:
        continue

      # Check that the shape_fn is consistent with event_ndims.
      try:
        param_shape = param.shape_fn(sample_shape=output_shape)
      except NotImplementedError:
        self.skipTest('No shape function implemented for bijector {} '
                      'parameter {}.'.format(bijector_name, param_name))
      self.assertGreaterEqual(
          param.event_ndims,
          prefer_static.rank_from_shape(param_shape) - sample_and_batch_ndims)

      if param.is_preferred:
        try:
          param_bijector = param.default_constraining_bijector_fn()
        except NotImplementedError:
          self.skipTest('No constraining bijector implemented for {} '
                        'parameter {}.'.format(bijector_name, param_name))
        unconstrained_shape = (
            param_bijector.inverse_event_shape_tensor(param_shape))
        unconstrained_param = samplers.normal(
            unconstrained_shape, seed=seeds[i])
        new_parameters[param_name] = param_bijector.forward(unconstrained_param)

        # Check that passing a float64 `eps` works with float64 parameters.
        b_float64 = params64[param_name].default_constraining_bijector_fn()
        b_float64(tf.cast(unconstrained_param, tf.float64))

    # Copy over any non-trainable parameters.
    new_parameters.update({
        k: v
        for (k, v) in bijector.parameters.items()
        if k in non_trainable_params
    })

    # Sanity check that we got valid parameters.
    new_parameters['validate_args'] = True
    new_bijector = type(bijector)(**new_parameters)
    self.evaluate(tf.group(*[v.initializer for v in new_bijector.variables]))
    xs = self._draw_domain_tensor(new_bijector, data, event_dim)
    self.evaluate(new_bijector.forward(xs))

  @parameterized.named_parameters(
      {'testcase_name': bname, 'bijector_name': bname}
      for bname in (set(TF2_FRIENDLY_BIJECTORS) -
                    set(AUTOVECTORIZATION_IS_BROKEN)))
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testAutoVectorization(self, bijector_name, data):

    # TODO(b/150161911): reconcile numeric behavior of eager and graph mode.
    if tf.executing_eagerly():
      return

    bijector, event_dim = self._draw_bijector(
        bijector_name, data,
        batch_shape=[],  # Avoid conflict with vmap sample dimension.
        validate_args=False,  # Work around lack of `If` support in vmap.
        allowed_bijectors=(set(TF2_FRIENDLY_BIJECTORS) -
                           set(AUTOVECTORIZATION_IS_BROKEN)))
    atol = AUTOVECTORIZATION_ATOL[bijector_name]
    rtol = AUTOVECTORIZATION_RTOL[bijector_name]

    # Forward
    n = 3
    xs = self._draw_domain_tensor(bijector, data, event_dim, sample_shape=[n])
    ys = bijector.forward(xs)
    vectorized_ys = tf.vectorized_map(bijector.forward, xs,
                                      fallback_to_while_loop=False)
    self.assertAllClose(*self.evaluate((ys, vectorized_ys)),
                        atol=atol, rtol=rtol)

    # FLDJ
    event_ndims = data.draw(
        hps.integers(
            min_value=bijector.forward_min_event_ndims,
            max_value=prefer_static.rank_from_shape(xs.shape) - 1))
    fldj_fn = functools.partial(bijector.forward_log_det_jacobian,
                                event_ndims=event_ndims)
    vectorized_fldj = tf.vectorized_map(fldj_fn, xs,
                                        fallback_to_while_loop=False)
    fldj = tf.broadcast_to(fldj_fn(xs), tf.shape(vectorized_fldj))
    self.assertAllClose(*self.evaluate((fldj, vectorized_fldj)),
                        atol=atol, rtol=rtol)

    # Inverse
    ys = self._draw_codomain_tensor(bijector, data, event_dim, sample_shape=[n])
    xs = bijector.inverse(ys)
    vectorized_xs = tf.vectorized_map(bijector.inverse, ys,
                                      fallback_to_while_loop=False)
    self.assertAllClose(*self.evaluate((xs, vectorized_xs)),
                        atol=atol, rtol=rtol)

    # ILDJ
    event_ndims = data.draw(
        hps.integers(
            min_value=bijector.inverse_min_event_ndims,
            max_value=prefer_static.rank_from_shape(ys.shape) - 1))
    ildj_fn = functools.partial(bijector.inverse_log_det_jacobian,
                                event_ndims=event_ndims)
    vectorized_ildj = tf.vectorized_map(ildj_fn, ys,
                                        fallback_to_while_loop=False)
    ildj = tf.broadcast_to(ildj_fn(ys), tf.shape(vectorized_ildj))
    self.assertAllClose(*self.evaluate((ildj, vectorized_ildj)),
                        atol=atol, rtol=rtol)

  @parameterized.named_parameters(
      {'testcase_name': bname, 'bijector_name': bname}
      for bname in TF2_FRIENDLY_BIJECTORS)
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testHashing(self, bijector_name, data):
    bijector_1, bijector_2 = data.draw(
        bijectors(bijector_name=bijector_name,
                  enable_vars=True, return_duplicate=True))
    self.assertEqual(hash(bijector_1), hash(bijector_2))

  @parameterized.named_parameters(
      {'testcase_name': bname, 'bijector_name': bname}
      for bname in TF2_FRIENDLY_BIJECTORS)
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testEquality(self, bijector_name, data):
    bijector_1, bijector_2 = data.draw(
        bijectors(bijector_name=bijector_name,
                  enable_vars=True, return_duplicate=True))
    self.assertEqual(bijector_1, bijector_2)
    self.assertFalse(bijector_1 != bijector_2)  # pylint: disable=g-generic-assert

  @parameterized.named_parameters(
      {'testcase_name': bname, 'bijector_name': bname}
      for bname in (set(TF2_FRIENDLY_BIJECTORS) -
                    set(COMPOSITE_TENSOR_IS_BROKEN)))
  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testCompositeTensor(self, bijector_name, data):

    bijector, event_dim = self._draw_bijector(
        bijector_name, data,
        batch_shape=[],
        validate_args=True,
        allowed_bijectors=(set(TF2_FRIENDLY_BIJECTORS) -
                           set(COMPOSITE_TENSOR_IS_BROKEN)))

    # TODO(b/182603117): Remove "if" condition and s/composite_bij/bijector
    # when AutoCT is enabled for meta-bijectors and LinearOperator.
    if type(bijector).__name__ in AUTO_COMPOSITE_TENSOR_IS_BROKEN:
      composite_bij = experimental.as_composite(bijector)
    else:
      composite_bij = bijector

    if not tf.executing_eagerly():
      composite_bij = tf.nest.map_structure(
          lambda x: (tf.convert_to_tensor(x)  # pylint: disable=g-long-lambda
                     if isinstance(x, DeferredTensor) else x),
          composite_bij,
          expand_composites=True)

    self.assertIsInstance(composite_bij, tf.__internal__.CompositeTensor)
    flat = tf.nest.flatten(composite_bij, expand_composites=True)
    unflat = tf.nest.pack_sequence_as(
        composite_bij, flat, expand_composites=True)

    # Compare forward maps before and after compositing.
    n = 3
    xs = self._draw_domain_tensor(bijector, data, event_dim, sample_shape=[n])
    before_ys = bijector.forward(xs)
    after_ys = unflat.forward(xs)
    self.assertAllClose(*self.evaluate((before_ys, after_ys)))

    # Compare inverse maps before and after compositing.
    ys = self._draw_codomain_tensor(bijector, data, event_dim, sample_shape=[n])
    before_xs = bijector.inverse(ys)
    after_xs = unflat.inverse(ys)
    self.assertAllClose(*self.evaluate((before_xs, after_xs)))

    # Input to tf.function
    self.assertAllClose(
        before_ys,
        tf.function(lambda b: b.forward(xs))(composite_bij),
        rtol=COMPOSITE_TENSOR_RTOL[bijector_name],
        atol=COMPOSITE_TENSOR_ATOL[bijector_name])

    # Forward mapping: Check differentiation through forward mapping with
    # respect to the input and parameter variables.  Also check that any
    # variables are not referenced overmuch.
    xs = self._draw_domain_tensor(bijector, data, event_dim)
    wrt_vars = [xs] + [v for v in composite_bij.trainable_variables
                       if v.dtype.is_floating]
    with tf.GradientTape() as tape:
      tape.watch(wrt_vars)
      # TODO(b/73073515): Fix graph mode gradients with bijector caching.
      ys = bijector.forward(xs + 0)
    grads = tape.gradient(ys, wrt_vars)
    assert_no_none_grad(bijector, 'forward', wrt_vars, grads)
Example #17
0
import funcy as fn
import hypothesis.strategies as st
import pytest
from hypothesis import given
from bidict import bidict

import aiger
from aiger import hypothesis as aigh
from aiger import common


@given(st.integers(2, 10), st.data())
def test_and(n_inputs, data):
    aag = common.and_gate([f'x{i}' for i in range(n_inputs)], "out")
    test_input = {f'x{i}': data.draw(st.booleans()) for i in range(n_inputs)}
    out, _ = aag(test_input)
    assert out['out'] == all(test_input.values())


@given(aigh.Circuits, aigh.Circuits, st.data())
def test_and2(aag1, aag2, data):
    aag3 = aag1 | aag2
    aag3 >>= common.and_gate(aag3.outputs)

    test_input = {f'{i}': data.draw(st.booleans()) for i in aag3.inputs}

    out1, _ = aag1(test_input)
    out2, _ = aag2(test_input)
    out3, _ = aag3(test_input)

    v12 = list(out1.values())[0] and list(out2.values())[0]
Example #18
0
 def vec_reduce(test_func, strat, arith_func, type):
     return pytest.mark.parametrize('strat,func,type', [
         (strat, arith_func, type)
     ])(given(data=st.data())(test_func))
Example #19
0
    ks = np.empty(10, dtype=np.int32)
    vs = np.empty(10)

    # insert an item
    n = kvp_minheap_insert(0, 0, 10, 5, 3.0, ks, vs)
    n = kvp_minheap_insert(0, n, 10, 1, 1.0, ks, vs)

    # ep has moved
    assert n == 2

    # data is there
    assert all(ks[:2] == [1, 5])
    assert all(vs[:2] == [1.0, 3.0])


@given(st.integers(10, 100), st.data())
def test_kvp_add_several(kvp_len, data):
    "Test filling up a KVP."
    ks = np.full(kvp_len, -1, dtype=np.int32)
    vs = np.zeros(kvp_len)

    n = 0

    values = st.floats(-100, 100)

    for k in range(kvp_len):
        v = data.draw(values)
        assume(v not in vs[:n])  # we can't keep drawing the same value
        n = kvp_minheap_insert(0, n, kvp_len, k, v, ks, vs)

    assert n == kvp_len
Example #20
0

class Foo:
    pass


foos = st.tuples().map(lambda _: Foo())


def test_can_create_arrays_of_composite_types():
    arr = minimal(nps.arrays(object, 100, foos))
    for x in arr:
        assert isinstance(x, Foo)


@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
    arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
    assert arr.shape == ()
    assert arr.dtype == np.dtype(object)
    assert arr.item() == x


def test_can_create_arrays_of_tuples():
    arr = minimal(
        nps.arrays(object, 10, st.tuples(st.integers(), st.integers())),
        lambda x: all(t0 != t1 for t0, t1 in x),
    )
    assert all(a in ((1, 0), (0, 1)) for a in arr)

Example #21
0
import numpy as np
from hypothesis import given, note

from tests.custom_strategies import (
    adv_integer_index,
    basic_indices,
    choices,
    integer_index,
    slice_index,
    valid_axes,
)


@given(seq=st.lists(elements=st.integers()),
       replace=st.booleans(),
       data=st.data())
def test_choices(seq: List[int], replace: bool, data: st.SearchStrategy):
    """ Ensures that the `choices` strategy:
        - draws from the provided sequence
        - respects input parameters"""
    upper = len(seq) + 10 if replace and seq else len(seq)
    size = data.draw(st.integers(0, upper), label="size")
    chosen = data.draw(choices(seq, size=size, replace=replace),
                       label="choices")
    assert set(chosen) <= set(seq), ("choices contains elements that do not "
                                     "belong to `seq`")
    assert len(chosen) == size, "the number of choices does not match `size`"

    if not replace and len(set(seq)) == len(seq):
        unique_choices = sorted(set(chosen))
        assert unique_choices == sorted(chosen), (
Example #22
0
class TestAdam(hu.HypothesisTestCase):

    @staticmethod
    def ref_adam(param, mom1, mom2, grad, LR, ITER,
                 beta1, beta2, epsilon, output_grad=False):
        t = ITER + 1
        corrected_local_rate = np.sqrt(1 - np.power(beta2, t)) / \
            (1 - np.power(beta1, t))
        mom1_out = (beta1 * mom1) + (1 - beta1) * grad
        mom2_out = (beta2 * mom2) + (1 - beta2) * np.square(grad)
        grad_out = corrected_local_rate * mom1_out / \
            (np.sqrt(mom2_out) + epsilon)
        param_out = param + LR * grad_out
        if output_grad:
            return param_out, mom1_out, mom2_out, grad_out
        else:
            return param_out, mom1_out, mom2_out

    @staticmethod
    def ref_row_wise_adam(param, mom1, mom2, grad, LR, ITER,
                          beta1, beta2, epsilon):
        t = ITER + 1
        corrected_local_rate = LR * np.sqrt(1 - np.power(beta2, t)) / \
            (1 - np.power(beta1, t))
        mom1_out = (beta1 * mom1) + (1 - beta1) * grad
        mom2_out = (beta2 * mom2) + (1 - beta2) * np.mean(np.square(grad))
        param_out = param + corrected_local_rate * mom1_out / \
            (np.sqrt(mom2_out) + epsilon)
        return (param_out, mom1_out, mom2_out)

    @given(inputs=hu.tensors(n=4),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           beta1=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           beta2=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           **hu.gcs)
    def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
        param, mom1, mom2, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)

        op = core.CreateOperator(
            "Adam",
            ["param", "mom1", "mom2", "grad", "lr", "iter"],
            ["output_param", "output_mom1", "output_mom2"],
            beta1=beta1, beta2=beta2, epsilon=epsilon)

        # Iter lives on the CPU
        input_device_options = {'iter': hu.cpu_do}

        self.assertReferenceChecks(
            gc, op,
            [param, mom1, mom2, grad, LR, ITER],
            functools.partial(
                self.ref_adam,
                beta1=beta1, beta2=beta2, epsilon=epsilon),
            input_device_options=input_device_options)

    @given(inputs=hu.tensors(n=4),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           beta1=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           beta2=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           **hu.gcs_cpu_only)
    def test_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
        param, mom1, mom2, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)

        op = core.CreateOperator(
            "Adam",
            ["param", "mom1", "mom2", "grad", "lr", "iter"],
            ["output_param", "output_mom1", "output_mom2", "output_grad"],
            beta1=beta1, beta2=beta2, epsilon=epsilon)

        # Iter lives on the CPU
        input_device_options = {'iter': hu.cpu_do}

        self.assertReferenceChecks(
            gc, op,
            [param, mom1, mom2, grad, LR, ITER],
            functools.partial(
                self.ref_adam,
                beta1=beta1, beta2=beta2, epsilon=epsilon, output_grad=True),
            input_device_options=input_device_options)

    @given(inputs=hu.tensors(n=4),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           beta1=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           beta2=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           data_strategy=st.data(),
           **hu.gcs)
    def test_sparse_adam(self, inputs, ITER, LR, beta1, beta2, epsilon,
                         data_strategy, gc, dc):
        param, mom1, mom2, grad = inputs
        mom2 = np.absolute(mom2)
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                max_dim=1,
                min_value=1,
                max_value=grad.shape[0],
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0])),
            ),
        )

        # Verify that the generated indices are unique
        hypothesis.assume(
            np.array_equal(
                np.unique(indices.flatten()),
                np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "SparseAdam",
            ["param", "mom1", "mom2", "indices", "grad", "lr", "iter"],
            ["param", "mom1", "mom2"],
            beta1=beta1, beta2=beta2, epsilon=epsilon)

        def ref_sparse(param, mom1, mom2, indices, grad, LR, ITER):
            param_out = np.copy(param)
            mom1_out = np.copy(mom1)
            mom2_out = np.copy(mom2)

            for i, index in enumerate(indices):
                param_out[index], mom1_out[index], mom2_out[index] = \
                    self.ref_adam(param[index], mom1[index], mom2[index],
                                  grad[i], LR, ITER,
                                  beta1, beta2, epsilon)
            return (param_out, mom1_out, mom2_out)

        # Iter lives on the CPU
        input_device_options = {'iter': hu.cpu_do}

        self.assertReferenceChecks(
            gc, op,
            [param, mom1, mom2, indices, grad, LR, ITER],
            ref_sparse,
            input_device_options=input_device_options)

    @given(inputs=hu.tensors(n=3),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           beta1=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           beta2=st.floats(min_value=0.01, max_value=0.99,
                           allow_nan=False, allow_infinity=False),
           epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           data_strategy=st.data(),
               **hu.gcs_cpu_only)
    def test_row_wise_sparse_adam(self, inputs, ITER, LR, beta1, beta2, epsilon,
                                  data_strategy, gc, dc):
        param, mom1, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)

        # Create a 1D row-wise average 2nd moment tensor.
        mom2 = data_strategy.draw(
            hu.tensor1d(min_len=param.shape[0], max_len=param.shape[0],
                        elements=hu.elements_of_type(dtype=np.float32))
        )
        mom2 = np.absolute(mom2)

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                max_dim=1,
                min_value=1,
                max_value=grad.shape[0],
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0])),
            ),
        )

        # Note that unlike SparseAdam, RowWiseSparseAdam uses a moment
        # tensor that is strictly 1-dimensional and equal in length to the
        # first dimension of the parameters, so indices must also be
        # 1-dimensional.
        indices = indices.flatten()

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # Verify that the generated indices are unique
        hypothesis.assume(np.array_equal(np.unique(indices), np.sort(indices)))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "RowWiseSparseAdam",
            ["param", "mom1", "mom2", "indices", "grad", "lr", "iter"],
            ["param", "mom1", "mom2"],
            beta1=beta1, beta2=beta2, epsilon=epsilon)

        def ref_row_wise_sparse(param, mom1, mom2, indices, grad, LR, ITER):
            param_out = np.copy(param)
            mom1_out = np.copy(mom1)
            mom2_out = np.copy(mom2)
            for i, index in enumerate(indices):
                param_out[index], mom1_out[index], mom2_out[index] = \
                    self.ref_row_wise_adam(param[index], mom1[index], mom2[index],
                                           grad[i], LR, ITER,
                                           beta1, beta2, epsilon)
            return (param_out, mom1_out, mom2_out)

        # Iter lives on the CPU
        input_device_options = {'iter': hu.cpu_do}

        self.assertReferenceChecks(
            gc, op,
            [param, mom1, mom2, indices, grad, LR, ITER],
            ref_row_wise_sparse,
            input_device_options=input_device_options)
Example #23
0
class TestMomentumSGD(serial.SerializedTestCase):
    @serial.given(n=st.integers(4, 8), nesterov=st.booleans(), **hu.gcs)
    def test_momentum_sgd(self, n, nesterov, gc, dc):
        param = np.random.rand(n).astype(np.float32)
        grad = np.random.rand(n).astype(np.float32)
        lr = np.random.rand(1).astype(np.float32)
        param_momentum = np.random.rand(n).astype(np.float32)
        momentum = 0.9

        def momentum_sgd(grad, param_momentum, lr, param=None):
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * param_momentum
                if param is None:
                    return [adjusted_gradient, adjusted_gradient]
                else:
                    paramup = param - adjusted_gradient
                    return [adjusted_gradient, adjusted_gradient, paramup]
            else:
                m_new = momentum * param_momentum + lr * grad
                grad_new = (1 + momentum) * m_new - momentum * param_momentum
                if param is None:
                    return [grad_new, m_new]
                else:
                    paramup = param - grad_new
                    return [grad_new, m_new, paramup]

        op = core.CreateOperator(
            "MomentumSGDUpdate",
            ["grad", "param_momentum", "lr", "param"],
            ["grad", "param_momentum", "param"],
            momentum=momentum,
            nesterov=int(nesterov),
        )

        self.assertReferenceChecks(device_option=gc,
                                   op=op,
                                   inputs=[grad, param_momentum, lr, param],
                                   reference=momentum_sgd)

        op_noparam = core.CreateOperator(
            "MomentumSGD",
            ["grad", "param_momentum", "lr"],
            ["grad", "param_momentum"],
            momentum=momentum,
            nesterov=int(nesterov),
        )

        self.assertReferenceChecks(device_option=gc,
                                   op=op_noparam,
                                   inputs=[grad, param_momentum, lr],
                                   reference=momentum_sgd)

    @serial.given(inputs=hu.tensors(n=3),
                  momentum=st.floats(min_value=0.1, max_value=0.9),
                  nesterov=st.booleans(),
                  lr=st.floats(min_value=0.1, max_value=0.9),
                  data_strategy=st.data(),
                  **hu.gcs)
    def test_sparse_momentum_sgd(self, inputs, momentum, nesterov, lr,
                                 data_strategy, gc, dc):
        w, grad, m = inputs

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                max_dim=1,
                min_value=1,
                max_value=grad.shape[0],
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0])),
            ), )

        # Verify that the generated indices are unique
        assume(
            np.array_equal(np.unique(indices.flatten()),
                           np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        # Make momentum >= 0
        m = np.abs(m)

        # Convert lr to a numpy array
        lr = np.asarray([lr], dtype=np.float32)

        op = core.CreateOperator("SparseMomentumSGDUpdate",
                                 ["grad", "m", "lr", "param", "indices"],
                                 ["adjusted_grad", "m", "param"],
                                 momentum=momentum,
                                 nesterov=int(nesterov),
                                 device_option=gc)

        # Reference
        def momentum_sgd(grad, m, lr):
            lr = lr[0]
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * m
                return (adjusted_gradient, adjusted_gradient)
            else:
                m_new = momentum * m + lr * grad
                return ((1 + momentum) * m_new - momentum * m, m_new)

        def sparse(grad, m, lr, param, i):
            grad_new, m_new = momentum_sgd(grad, m[i], lr)
            m[i] = m_new
            param[i] -= grad_new
            return (grad_new, m, param)

        self.assertReferenceChecks(gc, op, [grad, m, lr, w, indices], sparse)

    @unittest.skipIf(not workspace.has_gpu_support
                     and not workspace.has_hip_support, "No gpu support.")
    @given(n=st.integers(4, 8), nesterov=st.booleans(), **hu.gcs)
    def test_fp16momentum_sgd(self, n, nesterov, gc, dc):
        assume(core.IsGPUDeviceType(gc.device_type))
        gpuvers = workspace.GetDeviceProperties(0)["major"]
        if gpuvers < 6:
            print(
                "No FP16 support because major version {} < 6".format(gpuvers))
            return

        param = np.random.rand(n).astype(np.float16)
        grad = np.random.rand(n).astype(np.float16)
        lr = np.random.rand(1).astype(np.float32)
        param_momentum = np.random.rand(n).astype(np.float16)
        momentum = 0.9
        nesterov = True

        def momentum_sgd(grad, param_momentum, lr, param=None):
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * param_momentum
                paramup = param - adjusted_gradient
                return [adjusted_gradient, adjusted_gradient, paramup]
            else:
                m_new = momentum * param_momentum + lr * grad
                grad_new = (1 + momentum) * m_new - momentum * param_momentum
                paramup = param - grad_new
                return [grad_new, m_new, paramup]

        op = core.CreateOperator(
            "FP16MomentumSGDUpdate",
            ["grad", "param_momentum", "lr", "param"],
            ["grad", "param_momentum", "param"],
            momentum=momentum,
            nesterov=int(nesterov),
            weight_decay=0.0,
        )

        self.assertReferenceChecks(device_option=gc,
                                   op=op,
                                   inputs=[grad, param_momentum, lr, param],
                                   reference=momentum_sgd)
Example #24
0
class HypothesisSpec(RuleBasedStateMachine):
    def __init__(self):
        super(HypothesisSpec, self).__init__()
        self.database = None

    strategies = Bundle(u"strategy")
    strategy_tuples = Bundle(u"tuples")
    objects = Bundle(u"objects")
    basic_data = Bundle(u"basic")
    varied_floats = Bundle(u"varied_floats")

    def teardown(self):
        self.clear_database()

    @rule()
    def clear_database(self):
        if self.database is not None:
            self.database.close()
            self.database = None

    @rule()
    def set_database(self):
        self.teardown()
        self.database = ExampleDatabase()

    @rule(
        target=strategies,
        spec=sampled_from((
            integers(),
            booleans(),
            floats(),
            complex_numbers(),
            fractions(),
            decimals(),
            text(),
            binary(),
            none(),
            tuples(),
        )),
    )
    def strategy(self, spec):
        return spec

    @rule(target=strategies, values=lists(integers() | text(), min_size=1))
    def sampled_from_strategy(self, values):
        return sampled_from(values)

    @rule(target=strategies, spec=strategy_tuples)
    def strategy_for_tupes(self, spec):
        return tuples(*spec)

    @rule(target=strategies,
          source=strategies,
          level=integers(1, 10),
          mixer=text())
    def filtered_strategy(s, source, level, mixer):
        def is_good(x):
            return bool(
                Random(
                    hashlib.md5(
                        (mixer + repr(x)).encode(u"utf-8")).digest()).randint(
                            0, level))

        return source.filter(is_good)

    @rule(target=strategies, elements=strategies)
    def list_strategy(self, elements):
        return lists(elements)

    @rule(target=strategies, left=strategies, right=strategies)
    def or_strategy(self, left, right):
        return left | right

    @rule(target=varied_floats, source=floats())
    def float(self, source):
        return source

    @rule(target=varied_floats,
          source=varied_floats,
          offset=integers(-100, 100))
    def adjust_float(self, source, offset):
        return int_to_float(clamp(0, float_to_int(source) + offset, 2**64 - 1))

    @rule(target=strategies, left=varied_floats, right=varied_floats)
    def float_range(self, left, right):
        for f in (math.isnan, math.isinf):
            for x in (left, right):
                assume(not f(x))
        left, right = sorted((left, right))
        assert left <= right
        return floats(left, right)

    @rule(
        target=strategies,
        source=strategies,
        result1=strategies,
        result2=strategies,
        mixer=text(),
        p=floats(0, 1),
    )
    def flatmapped_strategy(self, source, result1, result2, mixer, p):
        assume(result1 is not result2)

        def do_map(value):
            rep = repr(value)
            random = Random(
                hashlib.md5((mixer + rep).encode(u"utf-8")).digest())
            if random.random() <= p:
                return result1
            else:
                return result2

        return source.flatmap(do_map)

    @rule(target=strategies, value=objects)
    def just_strategy(self, value):
        return just(value)

    @rule(target=strategy_tuples, source=strategies)
    def single_tuple(self, source):
        return (source, )

    @rule(target=strategy_tuples, left=strategy_tuples, right=strategy_tuples)
    def cat_tuples(self, left, right):
        return left + right

    @rule(target=objects, strat=strategies, data=data())
    def get_example(self, strat, data):
        data.draw(strat)

    @rule(target=strategies, left=integers(), right=integers())
    def integer_range(self, left, right):
        left, right = sorted((left, right))
        return integers(left, right)

    @rule(strat=strategies)
    def repr_is_good(self, strat):
        assert u" at 0x" not in repr(strat)
Example #25
0
class ParameterPropertiesTest(test_util.TestCase):
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testCanConstructAndSampleDistribution(self, data):

        # TODO(b/169874884): Implement `width` parameters to work around the need
        # for a high > low` joint constraint.
        # NormalInverseGaussian needs |skewness| < tailweight
        high_gt_low_constraint_dists = ('Bates', 'NormalInverseGaussian',
                                        'PERT', 'Triangular',
                                        'TruncatedCauchy', 'TruncatedNormal',
                                        'Uniform')
        not_annotated_dists = (
            'Empirical|event_ndims=0',
            'Empirical|event_ndims=1',
            'Empirical|event_ndims=2',
            'FiniteDiscrete',
            # cov_perturb_factor is not annotated since its shape
            # could be a vector or a matrix.
            'MultivariateNormalDiagPlusLowRankCovariance',
            'MultivariateStudentTLinearOperator',
            'PoissonLogNormalQuadratureCompound',
            'StoppingRatioLogistic',
        )
        non_trainable_dists = (high_gt_low_constraint_dists +
                               not_annotated_dists +
                               dhps.INSTANTIABLE_META_DISTS)

        non_trainable_tensor_params = (
            'atol',
            'rtol',
            'eigenvectors',  # TODO(b/171872834): DeterminantalPointProcess
            'total_count',
            'num_samples',
            'df',  # Can't represent constraint that Wishart df > dimension.
            'mean_direction')  # TODO(b/118492439): Add `UnitVector` bijector.
        non_trainable_non_tensor_params = (
            'batch_shape',  # SphericalUniform, at least, has explicit batch shape
            'dimension',
            'dtype')

        dist = data.draw(
            dhps.distributions(eligibility_filter=(
                lambda name: name not in non_trainable_dists)))
        sample_shape = tuple(
            self.evaluate(
                tf.concat(
                    [dist.batch_shape_tensor(),
                     dist.event_shape_tensor()],
                    axis=0)))

        params = type(dist).parameter_properties(num_classes=2)
        params64 = type(dist).parameter_properties(dtype=tf.float64,
                                                   num_classes=2)

        new_parameters = {}
        seeds = {
            k: s
            for (k, s) in zip(
                params.keys(),
                samplers.split_seed(test_util.test_seed(), n=len(params)))
        }
        for param_name, param in params.items():
            if param_name in non_trainable_tensor_params:
                new_parameters[param_name] = dist.parameters[param_name]
            elif param.is_preferred:
                b = param.default_constraining_bijector_fn()
                unconstrained_shape = (b.inverse_event_shape_tensor(
                    param.shape_fn(sample_shape=sample_shape)))
                unconstrained_param = samplers.normal(unconstrained_shape,
                                                      seed=seeds[param_name])
                new_parameters[param_name] = b.forward(unconstrained_param)

                # Check that passing a float64 `eps` works with float64 parameters.
                b_float64 = params64[
                    param_name].default_constraining_bijector_fn()
                b_float64(tf.cast(unconstrained_param, tf.float64))

        # Copy over any non-Tensor parameters.
        new_parameters.update({
            k: v
            for (k, v) in dist.parameters.items()
            if k in non_trainable_non_tensor_params
        })

        # Sanity check that we got valid parameters.
        new_parameters['validate_args'] = True
        new_dist = type(dist)(**new_parameters)
        x = self.evaluate(new_dist.sample(seed=test_util.test_seed()))
        self.assertEqual(sample_shape, x.shape)

        # Valid parameters should give non-nan samples.
        self.assertAllFalse(np.isnan(x))

    @parameterized.named_parameters({
        'testcase_name': dname,
        'dist_name': dname
    } for dname in sorted(
        list(dhps.INSTANTIABLE_BASE_DISTS.keys()) +
        list(dhps.INSTANTIABLE_META_DISTS)))
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testInferredBatchShapeMatchesTrueBatchShape(self, dist_name, data):
        with tfp_hps.no_cholesky_decomposition_errors():
            dist = data.draw(
                dhps.distributions(dist_name=dist_name, validate_args=False))
            with tfp_hps.no_tf_rank_errors():
                lp = dist.log_prob(dist.sample(seed=test_util.test_seed()))

        self.assertAllEqual(dist.batch_shape_tensor(), tf.shape(lp))
        self.assertAllEqual(dist.batch_shape, tf.shape(lp))
Example #26
0
class EventSpaceBijectorsTest(test_util.TestCase, dhps.TestCase):
    def check_event_space_bijector_constrains(self, dist, data):
        event_space_bijector = dist.experimental_default_event_space_bijector()
        if event_space_bijector is None:
            return

        # Draw a sample shape
        sample_shape = data.draw(tfp_hps.shapes())
        inv_event_shape = event_space_bijector.inverse_event_shape(
            tensorshape_util.concatenate(dist.batch_shape, dist.event_shape))

        # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]`
        # where `inverse_event_shape` is the event shape in the bijector's
        # domain. This is the shape of `y` in R**n, such that
        # x = event_space_bijector(y) has the event shape of the distribution.

        # TODO(b/174778703): Actually draw broadcast compatible shapes.
        batch_inv_event_compat_shape = inv_event_shape
        # batch_inv_event_compat_shape = data.draw(
        #     tfp_hps.broadcast_compatible_shape(inv_event_shape))
        # batch_inv_event_compat_shape = tensorshape_util.concatenate(
        #     (1,) * (len(inv_event_shape) - len(batch_inv_event_compat_shape)),
        #     batch_inv_event_compat_shape)

        total_sample_shape = tensorshape_util.concatenate(
            sample_shape, batch_inv_event_compat_shape)
        # full_sample_batch_event_shape = tensorshape_util.concatenate(
        #     sample_shape, inv_event_shape)

        y = data.draw(
            tfp_hps.constrained_tensors(tfp_hps.identity_fn,
                                        total_sample_shape.as_list()))
        hp.note('Trying to constrain inputs {}'.format(y))
        with tfp_hps.no_tf_rank_errors():
            x = event_space_bijector(y)
            hp.note('Got constrained samples {}'.format(x))
            with tf.control_dependencies(dist._sample_control_dependencies(x)):
                self.evaluate(tensor_util.identity_as_tensor(x))

            # TODO(b/158874412): Verify DoF changing default bijectors.
            # y_bc = tf.broadcast_to(y, full_sample_batch_event_shape)
            # x_bc = event_space_bijector(y_bc)
            # self.assertAllClose(x, x_bc)
            # fldj = event_space_bijector.forward_log_det_jacobian(y)
            # fldj_bc = event_space_bijector.forward_log_det_jacobian(y_bc)
            # self.assertAllClose(fldj, fldj_bc)

    @parameterized.named_parameters({
        'testcase_name': dname,
        'dist_name': dname
    } for dname in sorted(
        list(
            set(dhps.INSTANTIABLE_BASE_DISTS.keys()) -
            set(EVENT_SPACE_BIJECTOR_IS_BROKEN))))
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testDistributionWithVars(self, dist_name, data):
        dist = data.draw(
            dhps.base_distributions(dist_name=dist_name, enable_vars=True))
        self.evaluate([var.initializer for var in dist.variables])
        self.assume_loc_scale_ok(dist)
        self.check_event_space_bijector_constrains(dist, data)

    # TODO(b/146572907): Fix `enable_vars` for metadistributions and
    # fold these two tests into one.
    @parameterized.named_parameters({
        'testcase_name': dname,
        'dist_name': dname
    } for dname in sorted(
        list(
            set(dhps.INSTANTIABLE_META_DISTS) -
            set(EVENT_SPACE_BIJECTOR_IS_BROKEN))))
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testDistributionNoVars(self, dist_name, data):
        def ok(name):
            return name not in EVENT_SPACE_BIJECTOR_IS_BROKEN

        dist = data.draw(
            dhps.distributions(dist_name=dist_name,
                               enable_vars=False,
                               eligibility_filter=ok))
        self.assume_loc_scale_ok(dist)
        self.check_event_space_bijector_constrains(dist, data)
Example #27
0
 def vec_int_unary(test_func, unary_func, type):
     return pytest.mark.parametrize('func,type', [
         (unary_func, type)
     ])(given(data=st.data())(test_func))
                     min_side + side_range))
    assert len(smallest) == min_dims and all(k == min_side for k in smallest)


@given(scalar_dtypes())
def test_can_generate_scalar_dtypes(dtype):
    assert isinstance(dtype, np.dtype)


@given(nested_dtypes())
def test_can_generate_compound_dtypes(dtype):
    assert isinstance(dtype, np.dtype)


@given(nested_dtypes(max_itemsize=settings.default.buffer_size // 10),
       st.data())
def test_infer_strategy_from_dtype(dtype, data):
    # Given a dtype
    assert isinstance(dtype, np.dtype)
    # We can infer a strategy
    strat = from_dtype(dtype)
    assert isinstance(strat, SearchStrategy)
    # And use it to fill an array of that dtype
    data.draw(arrays(dtype, 10, strat))


@given(nested_dtypes())
def test_np_dtype_is_idempotent(dtype):
    assert dtype == np.dtype(dtype)

        columns = draw(st.integers(1, 10))
        return [
            [draw(st.integers(0, 10000)) for _ in range(columns)] for _ in range(rows)
        ]

    def transpose(m):
        return [[row[i] for row in m] for i in range(len(m[0]))]

    def is_square(m):
        return len(m) == len(m[0])

    value = minimal(matrix(), lambda m: is_square(m) and transpose(m) != m)
    assert len(value) == 2
    assert len(value[0]) == 2
    assert sorted(value[0] + value[1]) == [0, 0, 0, 1]


class MyList(list):
    pass


@given(st.data(), st.lists(st.integers()).map(MyList))
def test_does_not_change_arguments(data, ls):
    # regression test for issue #1017 or other argument mutation
    @st.composite
    def strat(draw, arg):
        return arg

    ex = data.draw(strat(ls))
    assert ex is ls
    ]
    th = TestHarness(32, 4, msgs)
    run_sim(th, cmdline_opts, max_cycles=40)


#-------------------------------------------------------------------------
# test case: pyh2
#-------------------------------------------------------------------------


@hypothesis.settings(deadline=None,
                     max_examples=100 if 'CI' not in os.environ else 5)
@hypothesis.given(
    in_nbits=st.integers(1, 64),
    max_nblocks=st.integers(2, 15),
    data=st.data(),
)
def test_pyh2(in_nbits, max_nblocks, data, cmdline_opts):
    len_msgs = data.draw(
        st.lists(st.integers(1, max_nblocks), min_size=1, max_size=100))
    src_msgs = [
        data.draw(st.integers(0, 2**(x * in_nbits) - 1)) for x in len_msgs
    ]

    msgs = []
    for x, l in zip(src_msgs, len_msgs):
        msgs.append(x)
        msgs.append(l)

    th = TestHarness(in_nbits, max_nblocks, msgs)
    run_sim(th, cmdline_opts)
class DistributionSlicingTest(test_util.TestCase):
    def _test_slicing(self, data, dist):
        strm = test_util.test_seed_stream()
        batch_shape = dist.batch_shape
        slices = data.draw(dhps.valid_slices(batch_shape))
        slice_str = 'dist[{}]'.format(', '.join(dhps.stringify_slices(slices)))
        # Make sure the slice string appears in Hypothesis' attempted example log
        hp.note('Using slice ' + slice_str)
        if not slices:  # Nothing further to check.
            return
        sliced_zeros = np.zeros(batch_shape)[slices]
        sliced_dist = dist[slices]
        hp.note('Using sliced distribution {}.'.format(sliced_dist))

        # Check that slicing modifies batch shape as expected.
        self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)

        if not sliced_zeros.size:
            # TODO(b/128924708): Fix distributions that fail on degenerate empty
            #     shapes, e.g. Multinomial, DirichletMultinomial, ...
            return

        # Check that sampling of sliced distributions executes.
        with tfp_hps.no_tf_rank_errors():
            samples = self.evaluate(dist.sample(seed=strm()))
            sliced_dist_samples = self.evaluate(
                sliced_dist.sample(seed=strm()))

        # Come up with the slices for samples (which must also include event dims).
        sample_slices = (tuple(slices) if isinstance(
            slices, collections.Sequence) else (slices, ))
        if Ellipsis not in sample_slices:
            sample_slices += (Ellipsis, )
        sample_slices += tuple([slice(None)] *
                               tensorshape_util.rank(dist.event_shape))

        sliced_samples = samples[sample_slices]

        # Report sub-sliced samples (on which we compare log_prob) to hypothesis.
        hp.note('Sample(s) for testing log_prob ' + str(sliced_samples))

        # Check that sampling a sliced distribution produces the same shape as
        # slicing the samples from the original.
        self.assertAllEqual(sliced_samples.shape, sliced_dist_samples.shape)

        # Check that a sliced distribution can compute the log_prob of its own
        # samples (up to numerical validation errors).
        with tfp_hps.no_tf_rank_errors():
            try:
                lp = self.evaluate(dist.log_prob(samples))
            except tf.errors.InvalidArgumentError:
                # TODO(b/129271256): d.log_prob(d.sample()) should not fail
                #     validate_args checks.
                # We only tolerate this case for the non-sliced dist.
                return
            sliced_lp = self.evaluate(sliced_dist.log_prob(sliced_samples))

        # Check that the sliced dist's log_prob agrees with slicing the original's
        # log_prob.

        # This `hp.assume` is suppressing array sizes that cause the sliced and
        # non-sliced distribution to follow different Eigen code paths.  Those
        # different code paths lead to arbitrarily large variations in the results
        # at parameter settings that Hypothesis is all too good at finding.  Since
        # the purpose of this test is just to check that we got slicing right, those
        # discrepancies are a distraction.
        # TODO(b/140229057): Remove this `hp.assume`, if and when Eigen's numerics
        # become index-independent.
        all_packetized = (_all_packetized(dist)
                          and _all_packetized(sliced_dist)
                          and _all_packetized(samples)
                          and _all_packetized(sliced_samples))
        hp.note('Packetization check {}'.format(all_packetized))
        all_non_packetized = (_all_non_packetized(dist)
                              and _all_non_packetized(sliced_dist)
                              and _all_non_packetized(samples)
                              and _all_non_packetized(sliced_samples))
        hp.note('Non-packetization check {}'.format(all_non_packetized))
        hp.assume(all_packetized or all_non_packetized)

        self.assertAllClose(lp[slices], sliced_lp, rtol=1e-5)

    def _run_test(self, data):
        def ok(name):
            return name not in INSTANTIABLE_BUT_NOT_SLICABLE

        dist = data.draw(
            dhps.distributions(enable_vars=False, eligibility_filter=ok))

        # Check that all distributions still register as non-iterable despite
        # defining __getitem__.  (Because __getitem__ magically makes an object
        # iterable for some reason.)
        with self.assertRaisesRegexp(TypeError, 'not iterable'):
            iter(dist)

        # Test slicing
        self._test_slicing(data, dist)

        # TODO(bjp): Enable sampling and log_prob checks. Currently, too many errors
        #     from out-of-domain samples.
        # self.evaluate(dist.log_prob(dist.sample(seed=test_util.test_seed())))

    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testDistributions(self, data):
        self._run_test(data)

    def disabled_testFailureCase(self):
        # TODO(b/140229057): This test should pass.
        dist = tfd.Chi(df=np.float32(27.744131))
        dist = tfd.TransformedDistribution(bijector=tfb.NormalCDF(),
                                           distribution=dist,
                                           batch_shape=[4])
        dist = tfb.Expm1()(dist)
        samps = 1.7182817 + tf.zeros_like(
            dist.sample(seed=test_util.test_seed()))
        self.assertAllClose(
            dist.log_prob(samps)[0], dist[0].log_prob(samps[0]))
class BijectorPropertiesTest(test_util.TestCase):
    def _draw_bijector(self,
                       bijector_name,
                       data,
                       batch_shape=None,
                       allowed_bijectors=None,
                       validate_args=True):
        event_dim = data.draw(hps.integers(min_value=2, max_value=6))
        bijector = data.draw(
            bijectors(bijector_name=bijector_name,
                      event_dim=event_dim,
                      enable_vars=True,
                      batch_shape=batch_shape,
                      allowed_bijectors=allowed_bijectors,
                      validate_args=validate_args))
        self.evaluate(tf.group(*[v.initializer for v in bijector.variables]))
        return bijector, event_dim

    def _draw_domain_tensor(self, bijector, data, event_dim, sample_shape=()):
        # TODO(axch): Would be nice to get rid of all this shape inference logic and
        # just rely on a notion of batch and event shape for bijectors, so we can
        # pass those through `domain_tensors` and `codomain_tensors` and use
        # `tensors_in_support`.  However, `RationalQuadraticSpline` behaves weirdly
        # somehow and I got confused.
        codomain_event_shape = [event_dim] * bijector.inverse_min_event_ndims
        codomain_event_shape = constrain_inverse_shape(bijector,
                                                       codomain_event_shape)
        shp = bijector.inverse_event_shape(codomain_event_shape)
        shp = functools.reduce(tensorshape_util.concatenate, [
            sample_shape,
            data.draw(
                tfp_hps.broadcast_compatible_shape(
                    shp[:shp.ndims - bijector.forward_min_event_ndims])),
            shp[shp.ndims - bijector.forward_min_event_ndims:]
        ])
        xs = tf.identity(data.draw(domain_tensors(bijector, shape=shp)),
                         name='xs')

        return xs

    def _draw_codomain_tensor(self, bijector, data, event_dim,
                              sample_shape=()):
        return self._draw_domain_tensor(tfb.Invert(bijector),
                                        data=data,
                                        event_dim=event_dim,
                                        sample_shape=sample_shape)

    @parameterized.named_parameters({
        'testcase_name': bname,
        'bijector_name': bname
    } for bname in TF2_FRIENDLY_BIJECTORS)
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testBijector(self, bijector_name, data):
        tfp_hps.guitar_skip_if_matches('Tanh', bijector_name, 'b/144163991')

        bijector, event_dim = self._draw_bijector(bijector_name, data)

        # Forward mapping: Check differentiation through forward mapping with
        # respect to the input and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        xs = self._draw_domain_tensor(bijector, data, event_dim)
        wrt_vars = [xs] + [
            v for v in bijector.trainable_variables if v.dtype.is_floating
        ]
        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `forward` of {}'.format(bijector)):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ys = bijector.forward(xs + 0)
        grads = tape.gradient(ys, wrt_vars)
        assert_no_none_grad(bijector, 'forward', wrt_vars, grads)

        # For scalar bijectors, verify correctness of the _is_increasing method.
        # TODO(b/148459057): Except, don't verify Softfloor on Guitar because
        # of numerical problem.
        def exception(bijector):
            if not tfp_hps.running_under_guitar():
                return False
            if isinstance(bijector, tfb.Softfloor):
                return True
            if isinstance(bijector, tfb.Invert):
                return exception(bijector.bijector)
            return False

        if (bijector.forward_min_event_ndims == 0
                and bijector.inverse_min_event_ndims == 0
                and not exception(bijector)):
            dydx = grads[0]
            hp.note('dydx: {}'.format(dydx))
            isfinite = tf.math.is_finite(dydx)
            incr_or_slope_eq0 = bijector._internal_is_increasing() | tf.equal(
                dydx, 0)  # pylint: disable=protected-access
            self.assertAllEqual(
                isfinite & incr_or_slope_eq0,
                isfinite & (dydx >= 0) | tf.zeros_like(incr_or_slope_eq0))

        # FLDJ: Check differentiation through forward log det jacobian with
        # respect to the input and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        event_ndims = data.draw(
            hps.integers(min_value=bijector.forward_min_event_ndims,
                         max_value=xs.shape.ndims))
        with tf.GradientTape() as tape:
            max_permitted = _ldj_tensor_conversions_allowed(bijector,
                                                            is_forward=True)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `forward_log_det_jacobian` of {}'.format(bijector),
                    max_permissible=max_permitted):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ldj = bijector.forward_log_det_jacobian(
                    xs + 0, event_ndims=event_ndims)
        grads = tape.gradient(ldj, wrt_vars)
        assert_no_none_grad(bijector, 'forward_log_det_jacobian', wrt_vars,
                            grads)

        # Inverse mapping: Check differentiation through inverse mapping with
        # respect to the codomain "input" and parameter variables.  Also check that
        # any variables are not referenced overmuch.
        ys = self._draw_codomain_tensor(bijector, data, event_dim)
        wrt_vars = [ys] + [
            v for v in bijector.trainable_variables if v.dtype.is_floating
        ]
        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `inverse` of {}'.format(bijector)):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                xs = bijector.inverse(ys + 0)
        grads = tape.gradient(xs, wrt_vars)
        assert_no_none_grad(bijector, 'inverse', wrt_vars, grads)

        # ILDJ: Check differentiation through inverse log det jacobian with respect
        # to the codomain "input" and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        event_ndims = data.draw(
            hps.integers(min_value=bijector.inverse_min_event_ndims,
                         max_value=ys.shape.ndims))
        with tf.GradientTape() as tape:
            max_permitted = _ldj_tensor_conversions_allowed(bijector,
                                                            is_forward=False)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `inverse_log_det_jacobian` of {}'.format(bijector),
                    max_permissible=max_permitted):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ldj = bijector.inverse_log_det_jacobian(
                    ys + 0, event_ndims=event_ndims)
        grads = tape.gradient(ldj, wrt_vars)
        assert_no_none_grad(bijector, 'inverse_log_det_jacobian', wrt_vars,
                            grads)

        # Check that the outputs of forward_dtype and inverse_dtype match the dtypes
        # of the outputs of forward and inverse.
        self.assertAllEqualNested(ys.dtype, bijector.forward_dtype(xs.dtype))
        self.assertAllEqualNested(xs.dtype, bijector.inverse_dtype(ys.dtype))

    @parameterized.named_parameters({
        'testcase_name': bname,
        'bijector_name': bname
    } for bname in (set(TF2_FRIENDLY_BIJECTORS) -
                    set(AUTOVECTORIZATION_IS_BROKEN)))
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testAutoVectorization(self, bijector_name, data):

        # TODO(b/150161911): reconcile numeric behavior of eager and graph mode.
        if tf.executing_eagerly():
            return

        bijector, event_dim = self._draw_bijector(
            bijector_name,
            data,
            batch_shape=[],  # Avoid conflict with vmap sample dimension.
            validate_args=False,  # Work around lack of `If` support in vmap.
            allowed_bijectors=(set(TF2_FRIENDLY_BIJECTORS) -
                               set(AUTOVECTORIZATION_IS_BROKEN)))
        atol = AUTOVECTORIZATION_ATOL[bijector_name]
        rtol = AUTOVECTORIZATION_RTOL[bijector_name]

        # Forward
        n = 3
        xs = self._draw_domain_tensor(bijector,
                                      data,
                                      event_dim,
                                      sample_shape=[n])
        ys = bijector.forward(xs)
        vectorized_ys = tf.vectorized_map(bijector.forward, xs)
        self.assertAllClose(*self.evaluate((ys, vectorized_ys)),
                            atol=atol,
                            rtol=rtol)

        # FLDJ
        event_ndims = data.draw(
            hps.integers(min_value=bijector.forward_min_event_ndims,
                         max_value=prefer_static.rank_from_shape(xs.shape) -
                         1))
        fldj_fn = functools.partial(bijector.forward_log_det_jacobian,
                                    event_ndims=event_ndims)
        vectorized_fldj = tf.vectorized_map(fldj_fn, xs)
        fldj = tf.broadcast_to(fldj_fn(xs), tf.shape(vectorized_fldj))
        self.assertAllClose(*self.evaluate((fldj, vectorized_fldj)),
                            atol=atol,
                            rtol=rtol)

        # Inverse
        ys = self._draw_codomain_tensor(bijector,
                                        data,
                                        event_dim,
                                        sample_shape=[n])
        xs = bijector.inverse(ys)
        vectorized_xs = tf.vectorized_map(bijector.inverse, ys)
        self.assertAllClose(*self.evaluate((xs, vectorized_xs)),
                            atol=atol,
                            rtol=rtol)

        # ILDJ
        event_ndims = data.draw(
            hps.integers(min_value=bijector.inverse_min_event_ndims,
                         max_value=prefer_static.rank_from_shape(ys.shape) -
                         1))
        ildj_fn = functools.partial(bijector.inverse_log_det_jacobian,
                                    event_ndims=event_ndims)
        vectorized_ildj = tf.vectorized_map(ildj_fn, ys)
        ildj = tf.broadcast_to(ildj_fn(ys), tf.shape(vectorized_ildj))
        self.assertAllClose(*self.evaluate((ildj, vectorized_ildj)),
                            atol=atol,
                            rtol=rtol)
Example #33
0
class Eq(Type):
    """Equality and inequality comparison

    Minimal complete definition:

    ..

        (__eq__ | __ne__) & sample_type

    Minimal complete definition for type constructors:

    ..

        (__eq_generic__ | (__eq_test__ & (__eq__ | __ne__))) & sample_eq_type

    """
    def __eq__(self, other):
        """Equality comparison: ``Eq a => a -> a -> bool``

        Can be used as ``==`` operator.

        The default implementation uses ``__ne__``.

        """
        return not self.__ne__(other)

    def __ne__(self, other):
        """Inequality comparison: ``Eq a => a -> a -> bool``

        Can be used as ``!=`` operator.

        The default implementation uses ``__eq__``.

        """
        return not self.__eq__(other)

    #
    # Sampling functions for property tests
    #

    @class_function
    def sample_eq_type(cls):
        """Sample Eq type

        By default, assume that the type is always Eq. Subclasses should
        override this when needed, for instance, if a type from a type
        constructor is Eq only if it's type argument is Eq (e.g., Maybe)

        """
        return cls.sample_type()

    #
    # Test typeclass laws
    #

    @class_function
    def assert_eq_reflexivity(cls, x):
        assert (x == x) is True
        return

    @class_function
    @given(st.data())
    def test_eq_reflexivity(cls, data):
        """Test ``x == x = True``"""
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        cls.assert_eq_reflexivity(x)
        return

    @class_function
    def assert_eq_symmetry(cls, x, y):
        assert (x == y) == (y == x)
        return

    @class_function
    @given(st.data())
    def test_eq_symmetry(cls, data):
        """Test ``x == y = y == x``"""
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        y = data.draw(a)
        cls.assert_eq_symmetry(x, y)
        return

    @class_function
    def assert_eq_transitivity(cls, x, y, z):
        cond = (x == y) and (y == z)
        then = (x == z)
        assert (cond and then) or (not cond)
        return

    @class_function
    @given(st.data())
    def test_eq_transitivity(cls, data):
        """Test if ``x == y && y == z = True``, then ``x == z = True``"""
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        y = data.draw(a)
        z = data.draw(a)
        cls.assert_eq_transitivity(x, y, z)
        return

    @class_function
    def assert_eq_substitutivity(cls, x, y, f):
        cond = (x == y)
        then = (f(x) == f(y))
        assert (cond and then) or (not cond)
        return

    @class_function
    @given(st.data())
    def test_eq_substitutivity(cls, data):
        """Test if ``x == y = True``, then ``f(x) == f(y) = True``"""

        # Draw types
        a = data.draw(cls.sample_eq_type())
        b = data.draw(testing.sample_eq_type())

        # Draw values
        x = data.draw(a)
        y = data.draw(a)
        f = data.draw(testing.sample_function(b))

        # Note: the only requirement for arbitrary functions is that the input
        # variable has __eq__ implemented. And we have that for Eq type so this
        # test can always be run.
        cls.assert_eq_substitutivity(x, y, f)
        return

    @class_function
    def assert_eq_negation(cls, x, y):
        neq = (x != y)
        eq = (x == y)
        assert (neq == (not eq))
        return

    @class_function
    @given(st.data())
    def test_eq_negation(cls, data):
        """Test ``x != y = not (x == y)``"""
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        y = data.draw(a)
        cls.assert_eq_negation(x, y)
        return

    #
    # Test default implementations
    #

    @class_function
    def assert_eq_eq(cls, x, y):
        assert (x == y) == eq(x, y)
        assert (x == y) == cls.__eq__(x, y)
        return

    @class_function
    @given(st.data())
    def test_eq_eq(cls, data):
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        y = data.draw(a)
        cls.assert_eq_eq(x, y)
        return

    @class_function
    def assert_eq_ne(cls, x, y):
        from haskpy.functions import ne
        assert (x != y) == ne(x, y)
        assert (x != y) == cls.__ne__(x, y)
        return

    @class_function
    @given(st.data())
    def test_eq_ne(cls, data):
        a = data.draw(cls.sample_eq_type())
        x = data.draw(a)
        y = data.draw(a)
        cls.assert_eq_eq(x, y)
        return
Example #34
0
    assume(math.isfinite(high))
    values = list(open_erange(series_key, low, high))
    cardinality = series_key
    assert len(values) == cardinality


@given(series_key=sampled_from(ESeries),
       value=floats(min_value=1e-35,
                    max_value=1e35,
                    allow_nan=False,
                    allow_infinity=False))
def test_less_than_or_equal(series_key, value):
    assert find_less_than_or_equal(series_key, value) <= value


@given(data())
def test_less_than_or_equal_returns_value_from_series(data):
    series_key = data.draw(sampled_from(ESeries))
    value = data.draw(sampled_from(series(series_key)))
    assert find_less_than_or_equal(series_key, value) == value


@given(series_key=sampled_from(ESeries),
       value=floats(min_value=1e-35,
                    max_value=1e35,
                    allow_nan=False,
                    allow_infinity=False))
def test_less_than(series_key, value):
    assert find_less_than(series_key, value) < value

Example #35
0
class TestAdagrad(hu.HypothesisTestCase):
    @staticmethod
    def ref_adagrad(param_in,
                    mom_in,
                    grad,
                    lr,
                    epsilon,
                    using_fp16=False,
                    output_effective_lr=False,
                    output_effective_lr_and_update=False):
        mom_in_f32 = mom_in
        param_in_f32 = param_in
        if (using_fp16):
            mom_in_f32 = mom_in.astype(np.float32)
            param_in_f32 = param_in.astype(np.float32)

        mom_out = mom_in_f32 + np.square(grad)
        effective_lr = lr / (np.sqrt(mom_out) + epsilon)
        grad_adj = effective_lr * grad
        param_out = param_in_f32 + grad_adj

        if output_effective_lr_and_update:
            if (using_fp16):
                return (param_out.astype(np.float16),
                        mom_out.astype(np.float16),
                        effective_lr.astype(np.float16),
                        grad_adj.astype(np.float16))
            else:
                return (param_out.astype(np.float32),
                        mom_out.astype(np.float32),
                        effective_lr.astype(np.float32),
                        grad_adj.astype(np.float32))
        elif output_effective_lr:
            if (using_fp16):
                return (param_out.astype(np.float16),
                        mom_out.astype(np.float16),
                        effective_lr.astype(np.float16))
            else:
                return (param_out.astype(np.float32),
                        mom_out.astype(np.float32),
                        effective_lr.astype(np.float32))

        if (using_fp16):
            return (param_out.astype(np.float16), mom_out.astype(np.float16))
        else:
            return (param_out.astype(np.float32), mom_out.astype(np.float32))

    @staticmethod
    def ref_row_wise_adagrad(param_in, mom_in, grad, lr, epsilon):
        mom_out = mom_in + np.mean(np.square(grad))
        grad_adj = lr * grad / (np.sqrt(mom_out) + epsilon)
        param_out = param_in + grad_adj
        return (param_out, mom_out)

    @given(inputs=hu.tensors(n=3),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           **hu.gcs)
    def test_adagrad(self, inputs, lr, epsilon, gc, dc):
        param, momentum, grad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(
            "Adagrad",
            ["param", "momentum", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc,
        )

        self.assertReferenceChecks(
            gc, op, [param, momentum, grad, lr],
            functools.partial(self.ref_adagrad, epsilon=epsilon))

    @given(inputs=hu.tensors(n=3),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           **hu.gcs_cpu_only)
    def test_adagrad_output_effective_lr(self, inputs, lr, epsilon, gc, dc):
        param, momentum, grad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(
            "Adagrad",
            ["param", "momentum", "grad", "lr"],
            ["param", "momentum", "effective_lr"],
            epsilon=epsilon,
            device_option=gc,
        )

        self.assertReferenceChecks(
            gc, op, [param, momentum, grad, lr],
            functools.partial(self.ref_adagrad,
                              epsilon=epsilon,
                              output_effective_lr=True))

    @given(inputs=hu.tensors(n=3),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           **hu.gcs_cpu_only)
    def test_adagrad_output_effective_lr_and_update(self, inputs, lr, epsilon,
                                                    gc, dc):
        param, momentum, grad = inputs
        lr = np.array([lr], dtype=np.float32)

        op = core.CreateOperator(
            "Adagrad",
            ["param", "momentum", "grad", "lr"],
            ["param", "momentum", "effective_lr", "update"],
            epsilon=epsilon,
            device_option=gc,
        )

        self.assertReferenceChecks(
            gc, op, [param, momentum, grad, lr],
            functools.partial(self.ref_adagrad,
                              epsilon=epsilon,
                              output_effective_lr_and_update=True))

    # Suppress filter_too_much health check.
    # Likely caused by `assume` call falling through too often.
    @settings(suppress_health_check=[HealthCheck.filter_too_much])
    @given(inputs=hu.tensors(n=3),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           data_strategy=st.data(),
           **hu.gcs)
    def test_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, dc):
        param, momentum, grad = inputs
        momentum = np.abs(momentum)
        lr = np.array([lr], dtype=np.float32)

        # Create an indexing array containing values that are lists of indices,
        # which index into grad
        indices = data_strategy.draw(
            hu.tensor(dtype=np.int64,
                      elements=st.sampled_from(np.arange(grad.shape[0]))), )
        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # For now, the indices must be unique
        hypothesis.assume(
            np.array_equal(np.unique(indices.flatten()),
                           np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "SparseAdagrad", ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_sparse(param,
                       momentum,
                       indices,
                       grad,
                       lr,
                       ref_using_fp16=False):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            for i, index in enumerate(indices):
                param_out[index], momentum_out[index] = self.ref_adagrad(
                    param[index],
                    momentum[index],
                    grad[i],
                    lr,
                    epsilon,
                    using_fp16=ref_using_fp16)
            return (param_out, momentum_out)

        ref_using_fp16_values = [False]
        if dc == hu.gpu_do:
            ref_using_fp16_values.append(True)

        for ref_using_fp16 in ref_using_fp16_values:
            if (ref_using_fp16):
                print('test_sparse_adagrad with half precision embedding')
                momentum_i = momentum.astype(np.float16)
                param_i = param.astype(np.float16)
            else:
                print('test_sparse_adagrad with full precision embedding')
                momentum_i = momentum.astype(np.float32)
                param_i = param.astype(np.float32)

            self.assertReferenceChecks(
                gc, op,
                [param_i, momentum_i, indices, grad, lr, ref_using_fp16],
                ref_sparse)

    @given(inputs=hu.tensors(n=2),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           data_strategy=st.data(),
           **hu.gcs)
    def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc,
                                  dc):
        param, momentum = inputs
        momentum = np.abs(momentum)
        lr = np.array([lr], dtype=np.float32)

        grad = np.empty(shape=(0, ) + param.shape[1:], dtype=np.float32)
        indices = np.empty(shape=(0, ), dtype=np.int64)

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        op = core.CreateOperator(
            "SparseAdagrad", ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            return (param_out, momentum_out)

        ref_using_fp16_values = [False]
        if dc == hu.gpu_do:
            ref_using_fp16_values.append(True)

        for ref_using_fp16 in ref_using_fp16_values:
            if (ref_using_fp16):
                print(
                    'test_sparse_adagrad_empty with half precision embedding')
                momentum_i = momentum.astype(np.float16)
                param_i = param.astype(np.float16)
            else:
                print(
                    'test_sparse_adagrad_empty with full precision embedding')
                momentum_i = momentum.astype(np.float32)
                param_i = param.astype(np.float32)

            self.assertReferenceChecks(
                gc, op, [param_i, momentum_i, indices, grad, lr], ref_sparse)

    # Suppress filter_too_much health check.
    # Likely caused by `assume` call falling through too often.
    @settings(suppress_health_check=[HealthCheck.filter_too_much])
    @given(inputs=hu.tensors(n=2),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           data_strategy=st.data(),
           **hu.gcs)
    def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy,
                                     gc, dc):
        param, grad = inputs
        lr = np.array([lr], dtype=np.float32)

        # Create a 1D row-wise average sum of squared gradients tensor.
        momentum = data_strategy.draw(
            hu.tensor1d(min_len=param.shape[0],
                        max_len=param.shape[0],
                        elements=hu.elements_of_type(dtype=np.float32)))
        momentum = np.abs(momentum)

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(dtype=np.int64,
                      elements=st.sampled_from(np.arange(grad.shape[0]))), )

        # Note that unlike SparseAdagrad, RowWiseSparseAdagrad uses a moment
        # tensor that is strictly 1-dimensional and equal in length to the
        # first dimension of the parameters, so indices must also be
        # 1-dimensional.
        indices = indices.flatten()

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # The indices must be unique
        hypothesis.assume(np.array_equal(np.unique(indices), np.sort(indices)))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "RowWiseSparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_row_wise_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            for i, index in enumerate(indices):
                param_out[index], momentum_out[
                    index] = self.ref_row_wise_adagrad(param[index],
                                                       momentum[index],
                                                       grad[i], lr, epsilon)
            return (param_out, momentum_out)

        self.assertReferenceChecks(gc, op,
                                   [param, momentum, indices, grad, lr],
                                   ref_row_wise_sparse)

    @given(inputs=hu.tensors(n=1),
           lr=st.floats(min_value=0.01,
                        max_value=0.99,
                        allow_nan=False,
                        allow_infinity=False),
           epsilon=st.floats(min_value=0.01,
                             max_value=0.99,
                             allow_nan=False,
                             allow_infinity=False),
           data_strategy=st.data(),
           **hu.gcs)
    def test_row_wise_sparse_adagrad_empty(self, inputs, lr, epsilon,
                                           data_strategy, gc, dc):
        param = inputs[0]
        lr = np.array([lr], dtype=np.float32)

        momentum = data_strategy.draw(
            hu.tensor1d(min_len=param.shape[0],
                        max_len=param.shape[0],
                        elements=hu.elements_of_type(dtype=np.float32)))
        momentum = np.abs(momentum)

        grad = np.empty(shape=(0, ) + param.shape[1:], dtype=np.float32)
        indices = np.empty(shape=(0, ), dtype=np.int64)

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        op = core.CreateOperator(
            "RowWiseSparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_row_wise_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            return (param_out, momentum_out)

        self.assertReferenceChecks(gc, op,
                                   [param, momentum, indices, grad, lr],
                                   ref_row_wise_sparse)
Example #36
0
def test_unique_indexes_of_small_values(ix):
    assert len(ix) <= 2
    assert len(set(ix)) == len(ix)


@given(pdst.indexes(dtype=bool, min_size=2, unique=True))
def test_unique_indexes_of_many_small_values(ix):
    assert len(ix) == 2
    assert len(set(ix)) == len(ix)


# Sizes that fit into an int64 without overflow
range_sizes = st.integers(0, 2**63 - 1)


@given(range_sizes, range_sizes | st.none(), st.data())
def test_arbitrary_range_index(i, j, data):
    if j is not None:
        i, j = sorted((i, j))
    data.draw(pdst.range_indexes(i, j))


@given(pdst.range_indexes())
def test_basic_range_indexes(ix):
    assert isinstance(ix, pandas.RangeIndex)


@settings(suppress_health_check=[HealthCheck.too_slow])
@given(st.data())
def test_generate_arbitrary_indices(data):
    min_size = data.draw(st.integers(0, 10), "min_size")
class StochasticProcessParamsAreVarsTest(tfp_test_util.TestCase):
    @parameterized.named_parameters({
        'testcase_name': sname,
        'process_name': sname
    } for sname in PARAM_EVENT_NDIMS_BY_PROCESS_NAME)
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings(default_max_examples=10,
                             suppress_health_check=[
                                 hp.HealthCheck.too_slow,
                                 hp.HealthCheck.filter_too_much,
                                 hp.HealthCheck.data_too_large
                             ])
    def testProcess(self, process_name, data):
        if tf.executing_eagerly() != (FLAGS.tf_mode == 'eager'):
            return
        seed = tfp_test_util.test_seed()
        process = data.draw(
            stochastic_processes(process_name=process_name, enable_vars=True))
        self.evaluate([var.initializer for var in process.variables])

        # Check that the process passes Variables through to the accessor
        # properties (without converting them to Tensor or anything like that).
        for k, v in six.iteritems(process.parameters):
            if not tensor_util.is_ref(v):
                continue
            self.assertIs(getattr(process, k), v)

        # Check that standard statistics do not read process parameters more
        # than twice (once in the stat itself and up to once in any validation
        # assertions).
        for stat in ['mean', 'covariance', 'stddev', 'variance']:
            hp.note('Testing excessive var usage in {}.{}'.format(
                process_name, stat))
            try:
                with tfp_hps.assert_no_excessive_var_usage(
                        'statistic `{}` of `{}`'.format(stat, process),
                        max_permissible=excessive_usage_count(process_name)):
                    getattr(process, stat)()

            except NotImplementedError:
                pass

        # Check that `sample` doesn't read process parameters more than twice,
        # and that it produces non-None gradients (if the process is fully
        # reparameterized).
        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `sample` of `{}`'.format(process),
                    max_permissible=excessive_usage_count(process_name)):
                sample = process.sample(seed=seed)
        if process.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
            grads = tape.gradient(sample, process.variables)
            for grad, var in zip(grads, process.variables):
                var_name = var.name.rstrip('_0123456789:')
                if grad is None:
                    raise AssertionError(
                        'Missing sample -> {} grad for process {}'.format(
                            var_name, process_name))

        # Test that log_prob produces non-None gradients.
        with tf.GradientTape() as tape:
            lp = process.log_prob(tf.stop_gradient(sample))
        grads = tape.gradient(lp, process.variables)
        for grad, var in zip(grads, process.variables):
            if grad is None:
                raise AssertionError(
                    'Missing log_prob -> {} grad for process {}'.format(
                        var, process_name))

        # Check that log_prob computations avoid reading process parameters
        # more than once.
        hp.note(
            'Testing excessive var usage in {}.log_prob'.format(process_name))
        try:
            with tfp_hps.assert_no_excessive_var_usage(
                    'evaluative `log_prob` of `{}`'.format(process),
                    max_permissible=excessive_usage_count(process_name)):
                process.log_prob(sample)
        except NotImplementedError:
            pass
def test_errors_when_normal_strategy_functions_are_used(f):
    with raises(InvalidArgument):
        getattr(st.data(), f)(lambda x: 1)
def test_can_generate_really_small_negative_floats(x):
    assume(x < 0)
    assert x <= -REALLY_SMALL_FLOAT


@fails
@given(floats())
@TRY_HARDER
def test_can_find_floats_that_do_not_round_trip_through_strings(x):
    assert float(str(x)) == x


@fails
@given(floats())
@TRY_HARDER
def test_can_find_floats_that_do_not_round_trip_through_reprs(x):
    assert float(repr(x)) == x


finite_floats = floats(allow_infinity=False, allow_nan=False)


@settings(deadline=None)
@given(finite_floats, finite_floats, data())
def test_floats_are_in_range(x, y, data):
    x, y = sorted((x, y))
    assume(x < y)

    t = data.draw(floats(x, y))
    assert x <= t <= y
Example #40
0
        pdst.indexes(
            min_size=3, max_size=3, dtype=bool
        ).example()


@given(pdst.indexes(dtype=bool, unique=True))
def test_unique_indexes_of_small_values(ix):
    assert len(ix) <= 2
    assert len(set(ix)) == len(ix)


# Sizes that fit into an int64 without overflow
range_sizes = st.integers(0, 2 ** 63 - 1)


@given(range_sizes, range_sizes | st.none(), st.data())
def test_arbitrary_range_index(i, j, data):
    if j is not None:
        i, j = sorted((i, j))
    data.draw(pdst.range_indexes(i, j))


@given(pdst.range_indexes())
def test_basic_range_indexes(ix):
    assert isinstance(ix, pandas.RangeIndex)


@given(st.data())
def test_generate_arbitrary_indices(data):
    min_size = data.draw(st.integers(0, 10), 'min_size')
    max_size = data.draw(
def test_nice_repr():
    assert repr(st.data()) == "data()"

@given(st.floats())
def test_down_means_lesser(x):
    lo = next_down(x)
    if not x > lo:
        assert (math.isnan(x) and math.isnan(lo)) or (x < 0 and math.isinf(x))


@given(st.floats(allow_nan=False, allow_infinity=False))
def test_updown_roundtrip(val):
    assert val == next_up(next_down(val))
    assert val == next_down(next_up(val))


@given(st.data(), st.floats(allow_nan=False, allow_infinity=False))
def test_floats_in_tiny_interval_within_bounds(data, center):
    assume(not (math.isinf(next_down(center)) or math.isinf(next_up(center))))
    lo = Decimal.from_float(next_down(center)).next_plus()
    hi = Decimal.from_float(next_up(center)).next_minus()
    assert float(lo) < lo < center < hi < float(hi)
    val = data.draw(st.floats(lo, hi))
    assert lo < val < hi


def test_float_free_interval_is_invalid():
    lo = (2 ** 54) + 1
    hi = lo + 2
    assert float(lo) < lo < hi < float(hi), 'There are no floats in [lo .. hi]'
    with pytest.raises(InvalidArgument):
        st.floats(lo, hi).example()
        except KeyError:
            pass
        model[k] = v
        target[k] = v
        target.check_valid()
        assert target[k] == v
        for r, s in model.items():
            try:
                assert s == target[r]
            except KeyError:
                pass
        assert len(target) <= min(len(model), size)


@settings(suppress_health_check=[HealthCheck.too_slow], deadline=None)
@given(write_pattern(min_size=2), st.data())
def test_always_evicts_the_lowest_scoring_value(writes, data):
    scores = {}

    n_keys = len({k for k, _ in writes})

    assume(n_keys > 1)

    size = data.draw(st.integers(1, n_keys - 1))

    evicted = set()

    def new_score(key):
        scores[key] = data.draw(
            st.integers(0, 1000), label='scores[%r]' % (key,))
        return scores[key]
Example #44
0
    json.loads(string)


@pytest.mark.parametrize(
    "start, type_",
    [
        ("dict", dict),
        ("list", list),
        ("STRING", text_type),
        ("NUMBER", integer_types + (float, )),
        ("TRUE", bool),
        ("FALSE", bool),
        ("NULL", type(None)),
    ],
)
@given(data=data())
def test_can_specify_start_rule(data, start, type_):
    string = data.draw(
        from_lark(Lark(EBNF_GRAMMAR, start="value"), start=start))
    value = json.loads(string)
    assert isinstance(value, type_)


def test_can_generate_ignored_tokens():
    list_grammar = r"""
    list : "[" [STRING ("," STRING)*] "]"
    STRING : /"[a-z]*"/
    WS : /[ \t\r\n]+/
    %ignore WS
    """
    strategy = from_lark(Lark(list_grammar, start="list"))
def test_errors_when_used_in_find():
    with raises(InvalidArgument):
        find(st.data(), lambda x: x.draw(st.booleans()))
class ParameterBijectorsTest(test_util.TestCase):

  @hp.given(hps.data())
  @tfp_hps.tfp_hp_settings()
  def testCanConstructAndSampleDistribution(self, data):

    # TODO(b/169874884): Implement `width` parameters to work around the need
    # for a high > low` joint constraint.
    high_gt_low_constraint_dists = ('Bates', 'PERT', 'Triangular',
                                    'TruncatedCauchy', 'TruncatedNormal',
                                    'Uniform')
    not_annotated_dists = ('Empirical|event_ndims=0', 'Empirical|event_ndims=1',
                           'Empirical|event_ndims=2', 'FiniteDiscrete',
                           'MultivariateStudentTLinearOperator',
                           'PoissonLogNormalQuadratureCompound',
                           'SphericalUniform', 'SinhArcsinh',
                           'StoppingRatioLogistic',)
    non_trainable_dists = (
        high_gt_low_constraint_dists + not_annotated_dists +
        dhps.INSTANTIABLE_META_DISTS)

    non_trainable_tensor_params = (
        'atol',
        'rtol',
        'eigenvectors',  # TODO(b/171872834): DeterminantalPointProcess
        'total_count',
        'num_samples',
        'df',  # Can't represent constraint that Wishart df > dimension.
        'mean_direction')  # TODO(b/118492439): Add `UnitVector` bijector.
    non_trainable_non_tensor_params = ('dimension', 'dtype'
                                      )  # Required by Zipf.

    dist = data.draw(
        dhps.distributions(
            eligibility_filter=(lambda name: name not in non_trainable_dists)))
    sample_shape = tuple(
        self.evaluate(
            tf.concat([dist.batch_shape_tensor(),
                       dist.event_shape_tensor()],
                      axis=0)))

    params = type(dist).parameter_properties(num_classes=2)
    params64 = type(dist).parameter_properties(dtype=tf.float64, num_classes=2)

    new_parameters = {}
    seeds = {k: s for (k, s) in zip(
        params.keys(),
        samplers.split_seed(test_util.test_seed(), n=len(params)))}
    for param_name, param in params.items():
      if param_name in non_trainable_tensor_params:
        new_parameters[param_name] = dist.parameters[param_name]
      elif param.is_preferred:
        b = param.default_constraining_bijector_fn()
        unconstrained_shape = (
            b.inverse_event_shape_tensor(
                param.shape_fn(sample_shape=sample_shape)))
        unconstrained_param = samplers.normal(
            unconstrained_shape, seed=seeds[param_name])
        new_parameters[param_name] = b.forward(unconstrained_param)

        # Check that passing a float64 `eps` works with float64 parameters.
        b_float64 = params64[param_name].default_constraining_bijector_fn()
        b_float64(tf.cast(unconstrained_param, tf.float64))

    # Copy over any non-Tensor parameters.
    new_parameters.update({
        k: v
        for (k, v) in dist.parameters.items()
        if k in non_trainable_non_tensor_params
    })

    # Sanity check that we got valid parameters.
    new_parameters['validate_args'] = True
    new_dist = type(dist)(**new_parameters)
    x = self.evaluate(new_dist.sample(seed=test_util.test_seed()))
    self.assertEqual(sample_shape, x.shape)

    # Valid parameters should give non-nan samples.
    self.assertAllFalse(np.isnan(x))
def test_errors_when_asked_for_example():
    with raises(InvalidArgument):
        st.data().example()
Example #48
0
    if isinstance(v, type) and v.__name__ != "BuiltinImporter")
generics = sorted(
    (
        t for t in types._global_type_lookup
        # We ignore TypeVar, because it is not a Generic type:
        if isinstance(t, types.typing_root_type) and t != typing.TypeVar),
    key=str,
)


@pytest.mark.parametrize("typ", generics, ids=repr)
@settings(
    suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much],
    database=None,
)
@given(data=st.data())
def test_resolve_typing_module(data, typ):
    ex = data.draw(from_type(typ))

    if typ in (typing.BinaryIO, typing.TextIO):
        assert isinstance(ex, io.IOBase)
    elif isinstance(typ, typing._ProtocolMeta):
        pass
    elif typ is typing.Type and not isinstance(typing.Type, type):
        assert ex is type or isinstance(ex, typing.TypeVar)
    else:
        assert isinstance(ex, typ)


@pytest.mark.parametrize("typ", [typing.Any, typing.Union])
def test_does_not_resolve_special_cases(typ):
    test_is_filtered()


# A variety of strategies that generate the integers 1-20 inclusive, but might
# differ in their support for special-case filtering.
one_to_twenty_strategies = [
    st.integers(1, 20),
    st.integers(0, 19).map(lambda x: x + 1),
    st.sampled_from(hrange(1, 21)),
    st.sampled_from(hrange(0, 20)).map(lambda x: x + 1),
]


@pytest.mark.parametrize("base", one_to_twenty_strategies)
@given(
    data=st.data(),
    forbidden_values=st.lists(st.integers(1, 20), max_size=19, unique=True),
)
def test_chained_filters_agree(data, forbidden_values, base):
    def forbid(s, forbidden):
        """Helper function to avoid Python variable scoping issues."""
        return s.filter(lambda x: x != forbidden)

    s = base
    for forbidden in forbidden_values:
        s = forbid(s, forbidden)

    x = data.draw(s)
    assert 1 <= x <= 20
    assert x not in forbidden_values
Example #50
0
class DistributionParamsAreVarsTest(parameterized.TestCase, tf.test.TestCase):
    @parameterized.parameters((dname, ) for dname in TF2_FRIENDLY_DISTS)
    @hp.given(hps.data())
    @hp.settings(deadline=None,
                 max_examples=hypothesis_max_examples(),
                 suppress_health_check=[hp.HealthCheck.too_slow],
                 derandomize=tfp_test_util.derandomize_hypothesis())
    def testDistribution(self, dist_name, data):
        if tf.executing_eagerly() != (FLAGS.tf_mode == 'eager'):
            return
        tf.compat.v1.set_random_seed(
            data.draw(
                hpnp.arrays(dtype=np.int64,
                            shape=[]).filter(lambda x: x != 0)))
        dist, batch_shape = data.draw(
            distributions(dist_name=dist_name, enable_vars=True))
        del batch_shape
        logging.info(
            'distribution: %s; parameters used: %s', dist,
            [k for k, v in six.iteritems(dist.parameters) if v is not None])
        self.evaluate([var.initializer for var in dist.variables])
        for k, v in six.iteritems(dist.parameters):
            if not tensor_util.is_mutable(v):
                continue
            try:
                self.assertIs(getattr(dist, k), v)
            except AssertionError as e:
                raise AssertionError(
                    'No attr found for parameter {} of distribution {}: \n{}'.
                    format(k, dist_name, e))

        for stat in data.draw(
                hps.permutations([
                    'covariance', 'entropy', 'mean', 'mode', 'stddev',
                    'variance'
                ]))[:3]:
            logging.info('%s.%s', dist_name, stat)
            try:
                VAR_USAGES.clear()
                getattr(dist, stat)()
                assert_no_excessive_var_usage('statistic `{}` of `{}`'.format(
                    stat, dist))
            except NotImplementedError:
                pass

        VAR_USAGES.clear()
        with tf.GradientTape() as tape:
            sample = dist.sample()
        assert_no_excessive_var_usage('method `sample` of `{}`'.format(dist))
        if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
            grads = tape.gradient(sample, dist.variables)
            for grad, var in zip(grads, dist.variables):
                if grad is None:
                    raise AssertionError(
                        'Missing sample -> {} grad for distribution {}'.format(
                            var, dist_name))

        # Turn off validations, since log_prob can choke on dist's own samples.
        dist = dist.copy(validate_args=False)
        if dist_name not in NO_LOG_PROB_PARAM_GRADS:
            with tf.GradientTape() as tape:
                lp = dist.log_prob(tf.stop_gradient(sample))
            grads = tape.gradient(lp, dist.variables)
            for grad, var in zip(grads, dist.variables):
                if grad is None:
                    raise AssertionError(
                        'Missing log_prob -> {} grad for distribution {}'.
                        format(var, dist_name))

        for evaluative in data.draw(
                hps.permutations([
                    'log_prob', 'prob', 'log_cdf', 'cdf',
                    'log_survival_function', 'survival_function'
                ]))[:3]:
            logging.info('%s.%s', dist_name, evaluative)
            try:
                VAR_USAGES.clear()
                getattr(dist, evaluative)(sample)
                assert_no_excessive_var_usage(
                    'evaluative `{}` of `{}`'.format(evaluative, dist),
                    max_permissible=1)  # No validation => 1 convert.
            except NotImplementedError:
                pass
Example #51
0
    p = mocker.patch(mock_print)
    sort_and_print_entries(entries, Args(*options))
    e = [mocker.call(entries[i]) for i in order]
    p.assert_has_calls(e)


# Each test has an "example" version for demonstrative purposes,
# and a test that uses the hypothesis module.


def test_range_check_returns_range_as_is_but_with_floats_example():
    assert range_check(10, 11) == (10.0, 11.0)
    assert range_check(6.4, 30) == (6.4, 30.0)


@given(x=floats(allow_nan=False, min_value=-1E8, max_value=1E8) | integers(), d=data())
def test_range_check_returns_range_as_is_if_first_is_less_than_second(x, d):
    # Pull data such that the first is less than the second.
    if isinstance(x, float):
        y = d.draw(floats(min_value=x + 1.0, max_value=1E9, allow_nan=False))
    else:
        y = d.draw(integers(min_value=x + 1))
    assert range_check(x, y) == (x, y)


def test_range_check_raises_value_error_if_second_is_less_than_first_example():
    with pytest.raises(ValueError, match="low >= high"):
        range_check(7, 2)


@given(x=floats(allow_nan=False), d=data())
Example #52
0
class DistributionSlicingTest(tf.test.TestCase):
    def _test_slicing(self, data, dist, batch_shape):
        slices = data.draw(valid_slices(batch_shape))
        slice_str = 'dist[{}]'.format(', '.join(stringify_slices(slices)))
        logging.info('slice used: %s', slice_str)
        # Make sure the slice string appears in Hypothesis' attempted example log,
        # by drawing and discarding it.
        data.draw(hps.just(slice_str))
        if not slices:  # Nothing further to check.
            return
        sliced_zeros = np.zeros(batch_shape)[slices]
        sliced_dist = dist[slices]
        self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)

        try:
            seed = data.draw(
                hpnp.arrays(dtype=np.int64, shape=[]).filter(lambda x: x != 0))
            samples = self.evaluate(dist.sample(seed=maybe_seed(seed)))

            if not sliced_zeros.size:
                # TODO(b/128924708): Fix distributions that fail on degenerate empty
                #     shapes, e.g. Multinomial, DirichletMultinomial, ...
                return

            sliced_samples = self.evaluate(
                sliced_dist.sample(seed=maybe_seed(seed)))
        except NotImplementedError as e:
            raise
        except tf.errors.UnimplementedError as e:
            if 'Unhandled input dimensions' in str(e) or 'rank not in' in str(
                    e):
                # Some cases can fail with 'Unhandled input dimensions \d+' or
                # 'inputs rank not in [0,6]: \d+'
                return
            raise

        # Come up with the slices for samples (which must also include event dims).
        sample_slices = (tuple(slices) if isinstance(
            slices, collections.Sequence) else (slices, ))
        if Ellipsis not in sample_slices:
            sample_slices += (Ellipsis, )
        sample_slices += tuple([slice(None)] *
                               tensorshape_util.rank(dist.event_shape))

        # Report sub-sliced samples (on which we compare log_prob) to hypothesis.
        data.draw(hps.just(samples[sample_slices]))
        self.assertAllEqual(samples[sample_slices].shape, sliced_samples.shape)
        try:
            try:
                lp = self.evaluate(dist.log_prob(samples))
            except tf.errors.InvalidArgumentError:
                # TODO(b/129271256): d.log_prob(d.sample()) should not fail
                #     validate_args checks.
                # We only tolerate this case for the non-sliced dist.
                return
            sliced_lp = self.evaluate(
                sliced_dist.log_prob(samples[sample_slices]))
        except tf.errors.UnimplementedError as e:
            if 'Unhandled input dimensions' in str(e) or 'rank not in' in str(
                    e):
                # Some cases can fail with 'Unhandled input dimensions \d+' or
                # 'inputs rank not in [0,6]: \d+'
                return
            raise
        # TODO(b/128708201): Better numerics for Geometric/Beta?
        # Eigen can return quite different results for packet vs non-packet ops.
        # To work around this, we use a much larger rtol for the last 3
        # (assuming packet size 4) elements.
        packetized_lp = lp[slices].reshape(-1)[:-3]
        packetized_sliced_lp = sliced_lp.reshape(-1)[:-3]
        rtol = (0.1 if any(x in dist.name for x in ('Geometric', 'Beta',
                                                    'Dirichlet')) else 0.02)
        self.assertAllClose(packetized_lp, packetized_sliced_lp, rtol=rtol)
        possibly_nonpacket_lp = lp[slices].reshape(-1)[-3:]
        possibly_nonpacket_sliced_lp = sliced_lp.reshape(-1)[-3:]
        rtol = 0.4
        self.assertAllClose(possibly_nonpacket_lp,
                            possibly_nonpacket_sliced_lp,
                            rtol=rtol)

    def _run_test(self, data):
        tf.compat.v1.set_random_seed(
            data.draw(
                hpnp.arrays(dtype=np.int64,
                            shape=[]).filter(lambda x: x != 0)))
        dist, batch_shape = data.draw(distributions())
        logging.info(
            'distribution: %s; parameters used: %s', dist,
            [k for k, v in six.iteritems(dist.parameters) if v is not None])
        self.assertAllEqual(batch_shape, dist.batch_shape)

        with self.assertRaisesRegexp(TypeError, 'not iterable'):
            iter(dist)  # __getitem__ magically makes an object iterable.

        self._test_slicing(data, dist, batch_shape)

        # TODO(bjp): Enable sampling and log_prob checks. Currently, too many errors
        #     from out-of-domain samples.
        # self.evaluate(dist.log_prob(dist.sample()))

    @hp.given(hps.data())
    @hp.settings(deadline=None,
                 max_examples=hypothesis_max_examples(),
                 suppress_health_check=[hp.HealthCheck.too_slow],
                 derandomize=tfp_test_util.derandomize_hypothesis())
    def testDistributions(self, data):
        if tf.executing_eagerly() != (FLAGS.tf_mode == 'eager'): return
        self._run_test(data)
Example #53
0
# Run for a while - arrays are a bigger search space than usual
settings.register_profile("ci", deadline=None)
settings.load_profile("ci")


an_array = npst.arrays(
    dtype=st.one_of(
        npst.unsigned_integer_dtypes(),
        npst.integer_dtypes(),
        npst.floating_dtypes(),
    ),
    shape=npst.array_shapes(max_side=3),  # max_side specified for performance
)


@given(st.data(), an_array)
def test_CFMask_coder_roundtrip(data, arr):
    names = data.draw(st.lists(st.text(), min_size=arr.ndim,
                               max_size=arr.ndim, unique=True).map(tuple))
    original = xr.Variable(names, arr)
    coder = xr.coding.variables.CFMaskCoder()
    roundtripped = coder.decode(coder.encode(original))
    xr.testing.assert_identical(original, roundtripped)


@given(st.data(), an_array)
def test_CFScaleOffset_coder_roundtrip(data, arr):
    names = data.draw(st.lists(st.text(), min_size=arr.ndim,
                               max_size=arr.ndim, unique=True).map(tuple))
    original = xr.Variable(names, arr)
    coder = xr.coding.variables.CFScaleOffsetCoder()
class _GradTest(test_util.TestCase):
    def _make_distribution(self,
                           dist_name,
                           params,
                           batch_shape,
                           override_params=None):
        override_params = override_params or {}
        all_params = dict(params)
        for param_name, override_param in override_params.items():
            all_params[param_name] = override_param
        all_params = dhps.constrain_params(all_params, dist_name)
        all_params = dhps.modify_params(all_params,
                                        dist_name,
                                        validate_args=False)
        return dhps.base_distributions(enable_vars=False,
                                       dist_name=dist_name,
                                       params=all_params,
                                       batch_shape=batch_shape,
                                       validate_args=False)

    def _param_func_generator(self,
                              data,
                              dist_name,
                              params,
                              batch_shape,
                              func,
                              generate_sample_function=False):
        for param_name, param in params.items():
            if (not tf.is_tensor(param)
                    or not np.issubdtype(param.dtype, np.floating)):
                continue

            def _dist_func(param_name, param):
                return data.draw(
                    self._make_distribution(
                        dist_name,
                        params,
                        batch_shape,
                        override_params={param_name: param}))

            def _func(param_name, param):
                return func(_dist_func(param_name, param))

            yield param_name, param, _dist_func, _func

    @test_base_distributions
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
    def testSample(self, dist_name, data):
        if (dist_name in self.sample_blocklist) != FLAGS.blocklists_only:
            self.skipTest('Distribution currently broken.')

        def _sample(dist):
            return dist.sample(seed=random.PRNGKey(0))

        params_unconstrained, batch_shape = data.draw(
            dhps.base_distribution_unconstrained_params(enable_vars=False,
                                                        dist_name=dist_name))

        for (param_name, unconstrained_param, dist_func,
             func) in self._param_func_generator(data, dist_name,
                                                 params_unconstrained,
                                                 batch_shape, _sample):
            dist = dist_func(param_name, unconstrained_param)
            if (dist.reparameterization_type !=
                    reparameterization.FULLY_REPARAMETERIZED):
                # Skip distributions that don't support differentiable sampling.
                self.skipTest('{} is not reparameterized.'.format(dist_name))
            self._test_transformation(functools.partial(func, param_name),
                                      unconstrained_param,
                                      msg=param_name)

    @test_base_distributions
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
    def testLogProbParam(self, dist_name, data):
        if (dist_name
                in self.logprob_param_blocklist) != FLAGS.blocklists_only:
            self.skipTest('Distribution currently broken.')

        params, batch_shape = data.draw(
            dhps.base_distribution_unconstrained_params(enable_vars=False,
                                                        dist_name=dist_name))
        constrained_params = dhps.constrain_params(params, dist_name)

        sampling_dist = data.draw(
            dhps.base_distributions(batch_shape=batch_shape,
                                    enable_vars=False,
                                    dist_name=dist_name,
                                    params=constrained_params))
        sample = sampling_dist.sample(seed=random.PRNGKey(0))

        def _log_prob(dist):
            return dist.log_prob(sample)

        for param_name, param, dist_func, func in self._param_func_generator(
                data, dist_name, params, batch_shape, _log_prob):
            del dist_func
            self._test_transformation(functools.partial(func, param_name),
                                      param,
                                      msg=param_name)

    @test_base_distributions
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings(default_max_examples=DEFAULT_MAX_EXAMPLES)
    def testLogProbSample(self, dist_name, data):
        if (dist_name
                in self.logprob_sample_blocklist) != FLAGS.blocklists_only:
            self.skipTest('Distribution currently broken.')

        params, batch_shape = data.draw(
            dhps.base_distribution_unconstrained_params(enable_vars=False,
                                                        dist_name=dist_name))
        constrained_params = dhps.constrain_params(params, dist_name)

        dist = data.draw(
            dhps.base_distributions(batch_shape=batch_shape,
                                    enable_vars=False,
                                    dist_name=dist_name,
                                    params=constrained_params))

        sample = dist.sample(seed=random.PRNGKey(0))
        if np.issubdtype(sample.dtype, np.integer):
            self.skipTest(
                '{} has integer samples; no derivative.'.format(dist_name))

        def _log_prob(sample):
            return dist.log_prob(sample)

        self._test_transformation(_log_prob, sample)
Example #55
0
 def _vec_float_binary(test_func, func, type):
     return pytest.mark.parametrize('func,type', [
         (func, type)
     ])(given(data=st.data())(test_func))
Example #56
0
def test_will_error_on_find():
    d = ConjectureData.for_buffer(bytes(0))
    d.is_find = True
    with pytest.raises(InvalidArgument):
        d.draw(st.data())
    assert minimal(complex_numbers(), lambda x: x.imag > 0 and x.real > 0) == 1 + 1j


def test_minimal_quadrant2():
    assert minimal(complex_numbers(), lambda x: x.imag > 0 and x.real < 0) == -1 + 1j


def test_minimal_quadrant3():
    assert minimal(complex_numbers(), lambda x: x.imag < 0 and x.real < 0) == -1 - 1j


def test_minimal_quadrant4():
    assert minimal(complex_numbers(), lambda x: x.imag < 0 and x.real > 0) == 1 - 1j


@given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x))
def test_max_magnitude_respected(data, mag):
    c = data.draw(complex_numbers(max_magnitude=mag))
    assert abs(c) <= mag * (1 + sys.float_info.epsilon)


@given(complex_numbers(max_magnitude=0))
def test_max_magnitude_zero(val):
    assert val == 0


@given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x))
def test_min_magnitude_respected(data, mag):
    c = data.draw(complex_numbers(min_magnitude=mag))
    assert (
        abs(c.real) >= mag
Example #58
0
from mygrad.tensor_base import Tensor
from mygrad.math import multiply, multiply_sequence

import hypothesis.strategies as st
from hypothesis import given
import hypothesis.extra.numpy as hnp

import numpy as np


@given(st.data())
def test_multiply_fwd(data):
    a = data.draw(hnp.arrays(shape=hnp.array_shapes(max_side=3, max_dims=3),
                             dtype=float,
                             elements=st.floats(-100, 100)))
    b = data.draw(hnp.arrays(shape=a.shape,
                             dtype=float,
                             elements=st.floats(-100, 100)))
    a = Tensor(a)
    b = b

    result = a.data * b
    assert np.allclose((a * b).data, result)
    assert np.allclose((b * a).data, result)
    assert np.allclose(multiply(a, b).data, result)
    assert np.allclose(multiply(b, a).data, result)
    assert np.allclose(multiply_sequence(a, b).data, result)
    assert np.allclose(multiply_sequence(b, a).data, result)


@given(st.data())
import pytest

import hypothesis.strategies as st
from hypothesis import Verbosity, HealthCheck, find, given, reject, \
    settings, unlimited
from hypothesis.errors import NoSuchExample
from tests.common.utils import no_shrink


@pytest.mark.parametrize('strat', [st.text(min_size=5)])
@settings(
    phases=no_shrink, deadline=None,
    suppress_health_check=HealthCheck.all()
)
@given(st.data())
def test_explore_arbitrary_function(strat, data):
    cache = {}

    def predicate(x):
        try:
            return cache[x]
        except KeyError:
            return cache.setdefault(x, data.draw(st.booleans(), label=repr(x)))

    try:
        find(
            strat, predicate,
            settings=settings(
                max_examples=10, database=None, timeout=unlimited,
                verbosity=Verbosity.quiet,
class DistributionParamsAreVarsTest(test_util.TestCase):
    @parameterized.named_parameters({
        'testcase_name': dname,
        'dist_name': dname
    } for dname in TF2_FRIENDLY_DISTS)
    @hp.given(hps.data())
    @tfp_hps.tfp_hp_settings()
    def testDistribution(self, dist_name, data):
        seed = test_util.test_seed()
        # Explicitly draw event_dim here to avoid relying on _params_event_ndims
        # later, so this test can support distributions that do not implement the
        # slicing protocol.
        event_dim = data.draw(hps.integers(min_value=2, max_value=6))
        dist = data.draw(
            dhps.distributions(dist_name=dist_name,
                               event_dim=event_dim,
                               enable_vars=True))
        batch_shape = dist.batch_shape
        batch_shape2 = data.draw(
            tfp_hps.broadcast_compatible_shape(batch_shape))
        dist2 = data.draw(
            dhps.distributions(dist_name=dist_name,
                               batch_shape=batch_shape2,
                               event_dim=event_dim,
                               enable_vars=True))
        self.evaluate([var.initializer for var in dist.variables])

        # Check that the distribution passes Variables through to the accessor
        # properties (without converting them to Tensor or anything like that).
        for k, v in six.iteritems(dist.parameters):
            if not tensor_util.is_ref(v):
                continue
            self.assertIs(getattr(dist, k), v)

        # Check that standard statistics do not read distribution parameters more
        # than twice (once in the stat itself and up to once in any validation
        # assertions).
        max_permissible = 2 + extra_tensor_conversions_allowed(dist)
        for stat in sorted(
                data.draw(
                    hps.sets(hps.one_of(
                        map(hps.just, [
                            'covariance', 'entropy', 'mean', 'mode', 'stddev',
                            'variance'
                        ])),
                             min_size=3,
                             max_size=3))):
            hp.note('Testing excessive var usage in {}.{}'.format(
                dist_name, stat))
            try:
                with tfp_hps.assert_no_excessive_var_usage(
                        'statistic `{}` of `{}`'.format(stat, dist),
                        max_permissible=max_permissible):
                    getattr(dist, stat)()

            except NotImplementedError:
                pass

        # Check that `sample` doesn't read distribution parameters more than twice,
        # and that it produces non-None gradients (if the distribution is fully
        # reparameterized).
        with tf.GradientTape() as tape:
            # TDs do bijector assertions twice (once by distribution.sample, and once
            # by bijector.forward).
            max_permissible = 2 + extra_tensor_conversions_allowed(dist)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `sample` of `{}`'.format(dist),
                    max_permissible=max_permissible):
                sample = dist.sample(seed=seed)
        if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
            grads = tape.gradient(sample, dist.variables)
            for grad, var in zip(grads, dist.variables):
                var_name = var.name.rstrip('_0123456789:')
                if var_name in NO_SAMPLE_PARAM_GRADS.get(dist_name, ()):
                    continue
                if grad is None:
                    raise AssertionError(
                        'Missing sample -> {} grad for distribution {}'.format(
                            var_name, dist_name))

        # Turn off validations, since TODO(b/129271256) log_prob can choke on dist's
        # own samples.  Also, to relax conversion counts for KL (might do >2 w/
        # validate_args).
        dist = dist.copy(validate_args=False)
        dist2 = dist2.copy(validate_args=False)

        # Test that KL divergence reads distribution parameters at most once, and
        # that is produces non-None gradients.
        try:
            for d1, d2 in (dist, dist2), (dist2, dist):
                with tf.GradientTape() as tape:
                    with tfp_hps.assert_no_excessive_var_usage(
                            '`kl_divergence` of (`{}` (vars {}), `{}` (vars {}))'
                            .format(d1, d1.variables, d2, d2.variables),
                            max_permissible=1
                    ):  # No validation => 1 convert per var.
                        kl = d1.kl_divergence(d2)
                wrt_vars = list(d1.variables) + list(d2.variables)
                grads = tape.gradient(kl, wrt_vars)
                for grad, var in zip(grads, wrt_vars):
                    if grad is None and dist_name not in NO_KL_PARAM_GRADS:
                        raise AssertionError(
                            'Missing KL({} || {}) -> {} grad:\n'
                            '{} vars: {}\n{} vars: {}'.format(
                                d1, d2, var, d1, d1.variables, d2,
                                d2.variables))
        except NotImplementedError:
            pass

        # Test that log_prob produces non-None gradients, except for distributions
        # on the NO_LOG_PROB_PARAM_GRADS blacklist.
        if dist_name not in NO_LOG_PROB_PARAM_GRADS:
            with tf.GradientTape() as tape:
                lp = dist.log_prob(tf.stop_gradient(sample))
            grads = tape.gradient(lp, dist.variables)
            for grad, var in zip(grads, dist.variables):
                if grad is None:
                    raise AssertionError(
                        'Missing log_prob -> {} grad for distribution {}'.
                        format(var, dist_name))

        # Test that all forms of probability evaluation avoid reading distribution
        # parameters more than once.
        for evaluative in sorted(
                data.draw(
                    hps.sets(hps.one_of(
                        map(hps.just, [
                            'log_prob', 'prob', 'log_cdf', 'cdf',
                            'log_survival_function', 'survival_function'
                        ])),
                             min_size=3,
                             max_size=3))):
            hp.note('Testing excessive var usage in {}.{}'.format(
                dist_name, evaluative))
            try:
                # No validation => 1 convert. But for TD we allow 2:
                # dist.log_prob(bijector.inverse(samp)) + bijector.ildj(samp)
                max_permissible = 2 + extra_tensor_conversions_allowed(dist)
                with tfp_hps.assert_no_excessive_var_usage(
                        'evaluative `{}` of `{}`'.format(evaluative, dist),
                        max_permissible=max_permissible):
                    getattr(dist, evaluative)(sample)
            except NotImplementedError:
                pass