コード例 #1
0
 def test_candidates(self, pre, post):
     """
     Names containing "settings" are candidates.
     """
     modname = pre + 'settings' + post
     note('modname={!r}'.format(modname))
     self.assertTrue(utils.is_candidate_name(modname))
コード例 #2
0
ファイル: test_scaling.py プロジェクト: rot256/crypsis
def test_scaling_search(vals, threads):
    note('Searching with %d threads' % threads)
    good = random.choice(vals)
    def funcer(t):
        return t == good
    res = scaling.search(funcer, vals, threads=threads)
    assert res == good
コード例 #3
0
ファイル: adagrad_test.py プロジェクト: Sissipei/caffe2
    def test_sparse_adagrad_empty(self, inputs, lr, epsilon,
                                  data_strategy, gc, dc):
        param, momentum = inputs
        momentum = np.abs(momentum)
        lr = np.array([lr], dtype=np.float32)

        grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
        indices = np.empty(shape=(0,), dtype=np.int64)

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        op = core.CreateOperator(
            "SparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            return (param_out, momentum_out)

        self.assertReferenceChecks(
            gc, op,
            [param, momentum, indices, grad, lr],
            ref_sparse)
コード例 #4
0
ファイル: test_fornberg.py プロジェクト: pbrod/numdifftools
    def test_high_order_derivative(x):
        small_radius = ['sqrt', 'log', 'log2', 'log10', 'arccos', 'log1p',
                        'arcsin', 'arctan', 'arcsinh', 'tan', 'tanh',
                        'arctanh', 'arccosh']
        r = 0.0061
        n_max = 20
        y = x
        for name in function_names + ['arccosh', 'arctanh']:
            f, true_df = get_function(name, n=1)
            if name == 'arccosh':
                y = y + 1

            vals, info = derivative(f, y, r=r, n=n_max, full_output=True,
                                    step_ratio=1.6)
            for n in range(1, n_max):
                f, true_df = get_function(name, n=n)
                if true_df is None:
                    continue

                tval = true_df(y)

                aerr0 = info.error_estimate[n] + 1e-15
                aerr = min(aerr0, max(np.abs(tval)*1e-6, 1e-8))
                print(n, name, y, vals[n], tval, info.iterations, aerr0, aerr)
                note("{}, {}, {}, {}, {}, {}, {}, {}".format(
                    n, name, y, vals[n], tval, info.iterations,
                               aerr0, aerr))
                assert_allclose(np.real(vals[n]), tval, rtol=1e-6, atol=aerr)
コード例 #5
0
    def teardown(self):
        """On teardown we clean up after ourselves as usual, but we also
        do some additional testing: We generate a .t file based on our test
        run using run-test.py -i to get the correct output.

        We then test it in a number of other configurations, verifying that
        each passes the same test."""
        super(verifyingstatemachine, self).teardown()
        try:
            shutil.rmtree(self.repodir)
        except OSError:
            pass
        ttest = os.linesep.join("  " + l for l in self.log)
        os.chdir(testtmp)
        path = os.path.join(testtmp, "test-generated.t")
        with open(path, 'w') as o:
            o.write(ttest + os.linesep)
        with open(os.devnull, "w") as devnull:
            rewriter = subprocess.Popen(
                [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
                stdout=devnull, stderr=devnull,
            )
            rewriter.communicate("yes")
            with open(path, 'r') as i:
                ttest = i.read()

        e = None
        if not self.failed:
            try:
                output = subprocess.check_output([
                    runtests, path, "--local", "--pure"
                ], stderr=subprocess.STDOUT)
                assert "Ran 1 test" in output, output
                for ext in (
                    self.all_extensions - self.non_skippable_extensions
                ):
                    tf = os.path.join(testtmp, "test-generated-no-%s.t" % (
                        ext,
                    ))
                    with open(tf, 'w') as o:
                        for l in ttest.splitlines():
                            if l.startswith("  $ hg"):
                                l = l.replace(
                                    "--config %s=" % (
                                        extensionconfigkey(ext),), "")
                            o.write(l + os.linesep)
                    with open(tf, 'r') as r:
                        t = r.read()
                        assert ext not in t, t
                    output = subprocess.check_output([
                        runtests, tf, "--local",
                    ], stderr=subprocess.STDOUT)
                    assert "Ran 1 test" in output, output
            except subprocess.CalledProcessError as e:
                note(e.output)
        if self.failed or e is not None:
            with open(savefile, "wb") as o:
                o.write(ttest)
        if e is not None:
            raise e
コード例 #6
0
ファイル: test_location.py プロジェクト: openbermuda/karmapi
def test_find_biggest_gap(lons):

    assume(lons)

    start, end = locations.find_biggest_gap(lons)

    note("start, end: {}. {}".format(start, end))

    # so all the other lons lie between end and start
    xstart = start
    if start <= end:
        xstart += 360.0

    xstart = twoplaces(xstart)
    end = twoplaces(end)

    epsilon = .1
    for lon in lons:

        lon = twoplaces(lon)

        if lon < end:
            lon += Decimal('360.0')

        assert(lon <= xstart)
コード例 #7
0
 def on_evict(self, key, value, score):
     note("Evicted %r" % (key,))
     assert score == scores[key]
     del scores[key]
     if len(scores) > 1:
         assert score <= min(v for k, v in scores.items() if k != last_entry[0])
     evicted.add(key)
コード例 #8
0
ファイル: adagrad_test.py プロジェクト: RichieMay/pytorch
    def test_sparse_adagrad(self, inputs, lr, epsilon,
                            data_strategy, gc, dc):
        param, momentum, grad = inputs
        momentum = np.abs(momentum)
        lr = np.array([lr], dtype=np.float32)

        # Create an indexing array containing values that are lists of indices,
        # which index into grad
        indices = data_strategy.draw(
            hu.tensor(dtype=np.int64,
                      elements=st.sampled_from(np.arange(grad.shape[0]))),
        )
        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # For now, the indices must be unique
        hypothesis.assume(np.array_equal(np.unique(indices.flatten()),
                                         np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "SparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_sparse(param, momentum, indices, grad, lr, ref_using_fp16=False):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            for i, index in enumerate(indices):
                param_out[index], momentum_out[index] = self.ref_adagrad(
                    param[index],
                    momentum[index],
                    grad[i],
                    lr,
                    epsilon,
                    using_fp16=ref_using_fp16
                )
            return (param_out, momentum_out)

        ref_using_fp16_values = [False]
        if dc == hu.gpu_do:
            ref_using_fp16_values.append(True)

        for ref_using_fp16 in ref_using_fp16_values:
            if(ref_using_fp16):
                print('test_sparse_adagrad with half precision embedding')
                momentum_i = momentum.astype(np.float16)
                param_i = param.astype(np.float16)
            else:
                print('test_sparse_adagrad with full precision embedding')
                momentum_i = momentum.astype(np.float32)
                param_i = param.astype(np.float32)

            self.assertReferenceChecks(
                gc, op, [param_i, momentum_i, indices, grad, lr, ref_using_fp16],
                ref_sparse
            )
コード例 #9
0
ファイル: adagrad_test.py プロジェクト: bittnt/caffe2
    def test_row_wise_sparse_adagrad_empty(self, inputs, lr, epsilon,
                                           data_strategy, gc, dc):
        param = inputs[0]
        lr = np.array([lr], dtype=np.float32)

        momentum = data_strategy.draw(
            hu.tensor1d(min_len=param.shape[0], max_len=param.shape[0],
                        elements=hu.elements_of_type(dtype=np.float32))
        )
        momentum = np.abs(momentum)

        grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
        indices = np.empty(shape=(0,), dtype=np.int64)

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        op = core.CreateOperator(
            "RowWiseSparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_row_wise_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            return (param_out, momentum_out)

        self.assertReferenceChecks(
            gc, op,
            [param, momentum, indices, grad, lr],
            ref_row_wise_sparse)
コード例 #10
0
ファイル: test_scaling.py プロジェクト: rot256/crypsis
def test_scaling_map(threads, vals):
    note('Mapping with %d threads' % threads)
    def funcer(t):
        m, p, n = t
        return pow(m, p, n)
    res = scaling.map(funcer, vals, threads=threads)
    assert set(res) == set(map(funcer, vals))
コード例 #11
0
ファイル: testing_hypothesis.py プロジェクト: dwhoman/CVPI
def test_add_images(images, a, b, scale, bias):
    assert len(images) == 2
    assert images[0].shape == images[1].shape
    
    image_1 = twoDto3d(images[0])
    image_2 = twoDto3d(images[1])
                
    image_sum = np.clip(np.ceil(scale * (a * image_1 + b * image_2) + bias), 0, 255)

    compl_proc = subprocess.check_output([
        "./cvpi_tests_hyp",
        "cvpi_image_add",
        image_hex(image_1),
        image_hex(image_2),
        str(images[0].shape[0]),
        str(images[0].shape[1]),
        str(a), str(b), format(scale, 'f'), format(bias, 'f')])

    compl_proc_str = ''.join(map(chr, compl_proc))
    numpy_image_str = image_hex(image_sum) + "\n"

    h.note(str(images[0].shape[0]) + " " + str(images[0].shape[1]))
    h.note(image_hex(image_1))
    h.note(image_hex(image_2))
    h.note("cvpi: " + compl_proc_str)
    h.note("numpy: " + numpy_image_str)

    assert numpy_image_str == compl_proc_str
コード例 #12
0
    def swagger_fuzzer(data):
        request = get_request(data, SPEC, SPEC_HOST)
        note("Curl command: {}".format(to_curl_command(request)))

        result = s.send(request)

        for validator in VALIDATORS:
            validator(SPEC, request, result, settings)
コード例 #13
0
 def test_identity(self, b_indexes):
     """
     b''.join(_chunks(b, indexes)) == b
     """
     (b, indexes) = b_indexes
     cs = list(_chunks(b, indexes))
     note('chunks = {!r}'.format(cs))
     self.assertEqual(b''.join(_chunks(b, indexes)), b)
コード例 #14
0
def test_can_eval_stream_inside_find(stream, rnd):
    x = find(
        st.lists(st.integers(min_value=0), min_size=10),
        lambda t: any(t > s for (t, s) in zip(t, stream)),
        settings=settings(database=None, max_shrinks=2000, max_examples=2000),
    )
    note("x: %r" % (x,))
    note("Evalled: %r" % (stream,))
    assert len([1 for i, v in enumerate(x) if stream[i] < v]) == 1
コード例 #15
0
 def test_modified_exceptions(self, pre, post):
     """
     Prefixed and suffixed versions of the special-cased names are not excepted.
     """
     assume(pre or post)  # Require at least some prefix or suffix
     for special in self.specials:
         modname = pre + special + post
         note('modname={!r}'.format(modname))
         self.assertTrue(utils.is_candidate_name(modname))
コード例 #16
0
    def test_uri(self, url):
        # type: (URL) -> None
        """
        L{HTTPRequestWrappingIRequest.uri} matches the underlying legacy
        request URI.
        """
        try:
            uri = url.asURI()  # Normalize as (network-friendly) URI
        except UnicodeError:
            # This happens due to a bug in hyperlink:
            #   https://github.com/python-hyper/hyperlink/issues/19
            # For now, all we can do is tell hypothesis to skip the sample that
            # got us here.
            assume(False)

        path = (
            uri.replace(scheme=u"", host=u"", port=None)
            .asText()
            .encode("ascii")
        )
        legacyRequest = self.legacyRequest(
            isSecure=(uri.scheme == u"https"),
            host=uri.host.encode("ascii"), port=uri.port, path=path,
        )
        request = HTTPRequestWrappingIRequest(request=legacyRequest)

        # Work around for https://github.com/mahmoud/hyperlink/issues/5
        def normalize(uri):
            # type: (URL) -> URL
            return uri.replace(path=(s for s in uri.path if s))

        note("_request.uri: {!r}".format(path))
        note("request.uri: {!r}".format(request.uri))

        uriNormalized = normalize(uri)
        requestURINormalized = normalize(request.uri)

        # Needed because non-equal URLs can render as the same strings
        def strURL(url):
            # type: (URL) -> Text
            return (
                u"URL(scheme={url.scheme!r}, "
                u"userinfo={url.userinfo!r}, "
                u"host={url.host!r}, "
                u"port={url.port!r}, "
                u"path={url.path!r}, "
                u"query={url.query!r}, "
                u"fragment={url.fragment!r}, "
                u"rooted={url.rooted})"
            ).format(url=url)

        self.assertEqual(
            requestURINormalized, uriNormalized,
            "{} != {}".format(
                strURL(requestURINormalized), strURL(uriNormalized)
            )
        )
コード例 #17
0
ファイル: test_wncc.py プロジェクト: aplavin/wncc
def test_no_mask(image, template):
    mask = np.ones_like(template)
    result = wncc(image, template, mask)
    result_nomask = wncc(image, template)

    h.note(result)
    h.note(result_nomask)

    np.testing.assert_array_equal(result, result_nomask)
コード例 #18
0
ファイル: test_wncc.py プロジェクト: aplavin/wncc
def test_fixed_image(image, templatemask):
    template, mask = templatemask

    result = wncc(image, template, mask)
    result_with_fixed = wncc_prepare(image, template.shape)(template, mask)

    h.note(result)
    h.note(result_with_fixed)

    np.testing.assert_array_equal(result, result_with_fixed)
コード例 #19
0
ファイル: test_fft.py プロジェクト: aplavin/wncc
def test_fft(arr_shape):
    arr, shape = arr_shape

    fftw = FFTW(shape, arr.dtype)
    expected = np.fft.rfft2(arr, shape)
    result = fftw(arr)

    h.note(expected)
    h.note(result)
    assert np.allclose(expected, result, equal_nan=True)
コード例 #20
0
ファイル: test_wncc.py プロジェクト: aplavin/wncc
def test_random(shape_image, shape_template):
    image = np.random.rand(*shape_image)
    template = np.random.rand(*shape_template)
    mask = np.random.rand(*shape_template)
    naive_result = _wncc_naive(image, template, mask)
    result = wncc(image, template, mask)

    h.note(naive_result)
    h.note(result)
    assert np.allclose(naive_result, result, atol=1e-2, equal_nan=True)
コード例 #21
0
 def query_is_valid(self):
     assume(self.query_manager.fields)
     fieldlist = [
         fld.name
         for fld
         in self.query_manager.fields.values()
     ]
     note('field list: {}'.format(list(fieldlist)))
     note('sql display: {}'.format(self.query_manager.sql_display))
     assert valid_sql(stmt=self.query_manager.sql_display)
コード例 #22
0
def test_add_criteria(field, value):
    random_op = random_operator(field)
    qm = example_query_manager()
    qm.add_criteria(
        field=field
        , value=value
        , operator=random_op
    )
    sql = qm.sql_display
    note('sql: {}'.format(sql))
    valid_sql(qm.sql_display)
コード例 #23
0
def acceptableerrors(*args):
    """Sometimes we know an operation we're about to perform might fail, and
    we're OK with some of the failures. In those cases this may be used as a
    context manager and will swallow expected failures, as identified by
    substrings of the error message Mercurial emits."""
    try:
        yield
    except subprocess.CalledProcessError as e:
        if not any(a in e.output for a in args):
            note(e.output)
            raise
コード例 #24
0
 def test_scalar_to_vector(val):
     def fun(x):
         return np.array([x, x**2, x**3])
     truth = np.array([[1., 2 * val, 3 * val**2]])
     for method in ['multicomplex', 'complex', 'central', 'forward',
                    'backward']:
         j0, info = nd.Jacobian(fun, method=method, full_output=True)(val)
         error = np.abs(j0-truth)
         note('method={}, error={}, error_est={}'.format(method, error,
                                                         info.error_estimate))
         assert_allclose(j0, truth, rtol=1e-3, atol=1e-6)
コード例 #25
0
ファイル: quick_test.py プロジェクト: mraxilus/experiments
 def test_partition_many(self, parameters):
     xs, left, right = parameters
     pivot = xs[-1]
     note('pivot: {0}'.format(pivot))
     pivot_index = quick.partition(xs, 0, len(xs) - 1)
     left, right = xs[:pivot_index], xs[pivot_index:]
     note('partitions: {0}, {1}'.format(left, right))
     if len(left) > 0:
         assert max(left) <= pivot
     if len(right) > 0:
         assert min(right) >= pivot
コード例 #26
0
ファイル: momentum_sgd_test.py プロジェクト: bittnt/caffe2
    def test_sparse_momentum_sgd(
        self, inputs, momentum, nesterov, lr, data_strategy, gc, dc
    ):
        w, grad, m = inputs

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0]))
            ),
        )
        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # For now, the indices must be unique
        hypothesis.assume(
            np.array_equal(
                np.unique(indices.flatten()), np.sort(indices.flatten())
            )
        )

        # Sparsify grad
        grad = grad[indices]
        # Make momentum >= 0
        m = np.abs(m)
        # Convert lr to a numpy array
        lr = np.asarray([lr], dtype=np.float32)

        op = core.CreateOperator(
            "SparseMomentumSGDUpdate", ["grad", "m", "lr", "param", "indices"],
            ["adjusted_grad", "m", "param"],
            momentum=momentum,
            nesterov=int(nesterov),
            device_option=gc
        )

        # Reference
        def momentum_sgd(grad, m, lr):
            lr = lr[0]
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * m
                return (adjusted_gradient, adjusted_gradient)
            else:
                m_new = momentum * m + lr * grad
                return ((1 + momentum) * m_new - momentum * m, m_new)

        def sparse(grad, m, lr, param, i):
            grad_new, m_new = momentum_sgd(grad, m[i], lr)
            m[i] = m_new
            param[i] -= grad_new
            return (grad_new, m, param)

        self.assertReferenceChecks(gc, op, [grad, m, lr, w, indices], sparse)
コード例 #27
0
ファイル: test_convolve.py プロジェクト: aplavin/wncc
def test_correlate(arr_arr):
    arr1, arr2 = arr_arr

    c = Convolver(arr1.shape, arr2.shape, arr1.dtype)
    c.add_array('A', arr1)
    c.add_array('B', arr2)

    expected = sp.signal.fftconvolve(arr1, arr2[::-1, ::-1])
    result = c.correlate('A', 'B')
    h.note(expected)
    h.note(result)
    assert np.allclose(expected, result, atol=1e-3, equal_nan=True)
コード例 #28
0
ファイル: test_fft.py プロジェクト: aplavin/wncc
def test_inverse_fft(arr_shape):
    arr, shape = arr_shape

    fft = np.fft.rfft2(arr, shape)

    fftw = FFTW(shape, arr.dtype)
    ifftw = fftw.get_inverse()
    expected = np.fft.irfft2(fft, shape)
    result = ifftw(fft)

    h.note(expected)
    h.note(result)
    assert np.allclose(expected, result, equal_nan=True)
コード例 #29
0
ファイル: test_wncc.py プロジェクト: aplavin/wncc
def test_gen_random(image, templatemask):
    template, mask = templatemask

    naive_result = _wncc_naive(image, template, mask)
    result = wncc(image, template, mask)

    h.note(naive_result)
    h.note(result)

    naive_finite = naive_result[np.isfinite(naive_result) & np.isfinite(result)]
    res_finite = result[np.isfinite(naive_result) & np.isfinite(result)]

    assert np.allclose(naive_finite, res_finite, atol=1e-2)
コード例 #30
0
ファイル: adagrad_test.py プロジェクト: bittnt/caffe2
    def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon,
                                     data_strategy, gc, dc):
        param, grad = inputs
        lr = np.array([lr], dtype=np.float32)

        # Create a 1D row-wise average sum of squared gradients tensor.
        momentum = data_strategy.draw(
            hu.tensor1d(min_len=param.shape[0], max_len=param.shape[0],
                        elements=hu.elements_of_type(dtype=np.float32))
        )
        momentum = np.abs(momentum)

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(dtype=np.int64,
                      elements=st.sampled_from(np.arange(grad.shape[0]))),
        )

        # Note that unlike SparseAdagrad, RowWiseSparseAdagrad uses a moment
        # tensor that is strictly 1-dimensional and equal in length to the
        # first dimension of the parameters, so indices must also be
        # 1-dimensional.
        indices = indices.flatten()

        hypothesis.note('indices.shape: %s' % str(indices.shape))

        # The indices must be unique
        hypothesis.assume(np.array_equal(np.unique(indices), np.sort(indices)))

        # Sparsify grad
        grad = grad[indices]

        op = core.CreateOperator(
            "RowWiseSparseAdagrad",
            ["param", "momentum", "indices", "grad", "lr"],
            ["param", "momentum"],
            epsilon=epsilon,
            device_option=gc)

        def ref_row_wise_sparse(param, momentum, indices, grad, lr):
            param_out = np.copy(param)
            momentum_out = np.copy(momentum)
            for i, index in enumerate(indices):
                param_out[index], momentum_out[index] = self.ref_row_wise_adagrad(
                    param[index], momentum[index], grad[i], lr, epsilon)
            return (param_out, momentum_out)

        self.assertReferenceChecks(
            gc, op,
            [param, momentum, indices, grad, lr],
            ref_row_wise_sparse)
コード例 #31
0
def test_example_message_round_trippable(data):
    """Ensure generated classes can be round-tripped into avro and back."""
    # setup
    parsed_schema = parse_into_types(schema=EXAMPLE_AVRO_MODEL_SCHEMA)
    namespaces = populate_namespaces([parsed_schema])

    # the actual intrinsic thing we care to test
    module_contents, = (
        contents
        for path, contents in rendering.render_modules(namespaces).items()
        if path.stem != '__init__')

    # write module contents out to file, then, load file and generate an example object
    tmp_path = Path('/tmp/example.py')
    with tmp_path.open('w') as f:
        f.write(module_contents)
    spec = imp.spec_from_file_location('example', tmp_path)
    example = imp.module_from_spec(spec)
    sys.modules['example'] = example
    spec.loader.exec_module(example)
    original_example_avro_model = data.draw(
        st.from_type(example.ExampleAvroModel))
    tmp_path.unlink()

    # clean up PBT-generated example model
    def check_decimal(d: Optional[Decimal]):
        if d is None:
            return
        assume(d.is_finite())
        assume(
            len(d.as_tuple().digits) < 21
        )  # if there are more digits than precision, round-tripping will truncate
        assume(
            d.as_tuple().exponent == -2
        )  # bug in avro python implementation; exponent _must_ match scale

    check_decimal(original_example_avro_model.decimal)
    check_decimal(original_example_avro_model.maybeDecimal)
    assume(
        original_example_avro_model.maybeInt is None or
        -2_147_483_648 <= original_example_avro_model.maybeInt <= 2_147_483_647
    )  # 32-bit signed range is underspecified by python "int" type (which also supports longs)
    assume(
        -9_223_372_036_854_775_808 <=
        original_example_avro_model.sampleInner.foo <=
        9_223_372_036_854_775_808)  # 64-bit signed range also underspecified
    if isinstance(original_example_avro_model.sampleUnion,
                  example.ExampleAvroModel.RecordWithInt):
        assume(-2_147_483_648 <= original_example_avro_model.sampleUnion.value
               <= 2_147_483_647)
    original_example_avro_model = original_example_avro_model._replace(
        timestamp=datetime(2000,
                           1,
                           1,
                           0,
                           0,
                           0,
                           000000,
                           tzinfo=avro.timezones.utc)
    )  # set this manually, since it's underspecified by the `datetime.datetime` type annotation

    # round trip through avro ser/deser
    avro_parsed_schema = avro.schema.parse(
        json.dumps(EXAMPLE_AVRO_MODEL_SCHEMA))
    example_model_dict = to_avro_dict(original_example_avro_model)
    note(example_model_dict)
    buffer = io.BytesIO()
    encoder = avro.io.BinaryEncoder(buffer)
    datum_writer = avro.io.DatumWriter(avro_parsed_schema)
    datum_writer.write(example_model_dict, encoder)
    buffer.seek(0)
    decoder = avro.io.BinaryDecoder(buffer)
    datum_reader = avro.io.DatumReader(writers_schema=avro_parsed_schema,
                                       readers_schema=avro_parsed_schema)
    round_tripped_example_avro_model_dict = datum_reader.read(decoder)
    note(round_tripped_example_avro_model_dict)
    round_tripped_example_avro_model = from_avro_dict(
        round_tripped_example_avro_model_dict,
        record_type=example.ExampleAvroModel)

    assert round_tripped_example_avro_model == original_example_avro_model
コード例 #32
0
    def testBijector(self, bijector_name, data):
        tfp_hps.guitar_skip_if_matches('Tanh', bijector_name, 'b/144163991')
        event_dim = data.draw(hps.integers(min_value=2, max_value=6))
        bijector = data.draw(
            bijectors(bijector_name=bijector_name,
                      event_dim=event_dim,
                      enable_vars=True))
        self.evaluate(tf.group(*[v.initializer for v in bijector.variables]))

        # Forward mapping: Check differentiation through forward mapping with
        # respect to the input and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        # TODO(axch): Would be nice to get rid of all this shape inference logic and
        # just rely on a notion of batch and event shape for bijectors, so we can
        # pass those through `domain_tensors` and `codomain_tensors` and use
        # `tensors_in_support`.  However, `RationalQuadraticSpline` behaves weirdly
        # somehow and I got confused.
        codomain_event_shape = [event_dim] * bijector.inverse_min_event_ndims
        codomain_event_shape = constrain_inverse_shape(bijector,
                                                       codomain_event_shape)
        shp = bijector.inverse_event_shape(codomain_event_shape)
        shp = tensorshape_util.concatenate(
            data.draw(
                tfp_hps.broadcast_compatible_shape(
                    shp[:shp.ndims - bijector.forward_min_event_ndims])),
            shp[shp.ndims - bijector.forward_min_event_ndims:])
        xs = tf.identity(data.draw(domain_tensors(bijector, shape=shp)),
                         name='xs')
        wrt_vars = [xs] + [
            v for v in bijector.trainable_variables if v.dtype.is_floating
        ]
        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `forward` of {}'.format(bijector)):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ys = bijector.forward(xs + 0)
        grads = tape.gradient(ys, wrt_vars)
        assert_no_none_grad(bijector, 'forward', wrt_vars, grads)

        # For scalar bijectors, verify correctness of the _is_increasing method.
        if (bijector.forward_min_event_ndims == 0
                and bijector.inverse_min_event_ndims == 0):
            dydx = grads[0]
            hp.note('dydx: {}'.format(dydx))
            isfinite = tf.math.is_finite(dydx)
            incr_or_slope_eq0 = bijector._internal_is_increasing() | tf.equal(
                dydx, 0)  # pylint: disable=protected-access
            self.assertAllEqual(
                isfinite & incr_or_slope_eq0,
                isfinite & (dydx >= 0) | tf.zeros_like(incr_or_slope_eq0))

        # FLDJ: Check differentiation through forward log det jacobian with
        # respect to the input and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        event_ndims = data.draw(
            hps.integers(min_value=bijector.forward_min_event_ndims,
                         max_value=xs.shape.ndims))
        with tf.GradientTape() as tape:
            max_permitted = _ldj_tensor_conversions_allowed(bijector,
                                                            is_forward=True)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `forward_log_det_jacobian` of {}'.format(bijector),
                    max_permissible=max_permitted):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ldj = bijector.forward_log_det_jacobian(
                    xs + 0, event_ndims=event_ndims)
        grads = tape.gradient(ldj, wrt_vars)
        assert_no_none_grad(bijector, 'forward_log_det_jacobian', wrt_vars,
                            grads)

        # Inverse mapping: Check differentiation through inverse mapping with
        # respect to the codomain "input" and parameter variables.  Also check that
        # any variables are not referenced overmuch.
        domain_event_shape = [event_dim] * bijector.forward_min_event_ndims
        domain_event_shape = constrain_forward_shape(bijector,
                                                     domain_event_shape)
        shp = bijector.forward_event_shape(domain_event_shape)
        shp = tensorshape_util.concatenate(
            data.draw(
                tfp_hps.broadcast_compatible_shape(
                    shp[:shp.ndims - bijector.inverse_min_event_ndims])),
            shp[shp.ndims - bijector.inverse_min_event_ndims:])
        ys = tf.identity(data.draw(codomain_tensors(bijector, shape=shp)),
                         name='ys')
        wrt_vars = [ys] + [
            v for v in bijector.trainable_variables if v.dtype.is_floating
        ]
        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `inverse` of {}'.format(bijector)):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                xs = bijector.inverse(ys + 0)
        grads = tape.gradient(xs, wrt_vars)
        assert_no_none_grad(bijector, 'inverse', wrt_vars, grads)

        # ILDJ: Check differentiation through inverse log det jacobian with respect
        # to the codomain "input" and parameter variables.  Also check that any
        # variables are not referenced overmuch.
        event_ndims = data.draw(
            hps.integers(min_value=bijector.inverse_min_event_ndims,
                         max_value=ys.shape.ndims))
        with tf.GradientTape() as tape:
            max_permitted = _ldj_tensor_conversions_allowed(bijector,
                                                            is_forward=False)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `inverse_log_det_jacobian` of {}'.format(bijector),
                    max_permissible=max_permitted):
                tape.watch(wrt_vars)
                # TODO(b/73073515): Fix graph mode gradients with bijector caching.
                ldj = bijector.inverse_log_det_jacobian(
                    ys + 0, event_ndims=event_ndims)
        grads = tape.gradient(ldj, wrt_vars)
        assert_no_none_grad(bijector, 'inverse_log_det_jacobian', wrt_vars,
                            grads)
コード例 #33
0
 def test_shuffle_is_noop(self, xss, rnd):
     yss = list(xss)
     rnd.shuffle(yss)
     note("Shuffle: {0}".format(yss))
     self.assertEqual(xss, yss)
コード例 #34
0
 def test(xs):
     note('Hi there')
     if sum(xs) <= 100:
         raise ValueError()
コード例 #35
0
 def test(i):
     note('Hi there')
コード例 #36
0
 def _accept_all(self, *commands: Command) -> None:
     for command in commands:
         note(str(command))
     self.goaltree.accept_all(*commands)
コード例 #37
0
def independents(draw,
                 batch_shape=None,
                 event_dim=None,
                 enable_vars=False,
                 depth=None):
    """Strategy for drawing `Independent` distributions.

  The underlying distribution is drawn from the `distributions` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `Independent` distribution.  Note that the underlying distribution will in
      general have a higher-rank batch shape, to make room for reinterpreting
      some of those dimensions as the `Independent`'s event.  Hypothesis will
      pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `Independent` distributions with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    reinterpreted_batch_ndims = draw(hps.integers(min_value=0, max_value=2))

    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims))
    else:  # This independent adds some batch dims to its underlying distribution.
        batch_shape = tensorshape_util.concatenate(
            batch_shape,
            draw(
                tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims,
                               max_ndims=reinterpreted_batch_ndims)))

    underlying = draw(
        distributions(batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1))
    hp.note('Forming Independent with underlying dist {}; '
            'parameters {}; reinterpreted_batch_ndims {}'.format(
                underlying, params_used(underlying),
                reinterpreted_batch_ndims))
    result_dist = tfd.Independent(
        underlying,
        reinterpreted_batch_ndims=reinterpreted_batch_ndims,
        validate_args=True)
    expected_shape = batch_shape[:len(batch_shape) - reinterpreted_batch_ndims]
    if expected_shape != result_dist.batch_shape:
        msg = ('Independent strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist,
                                                      expected_shape)
        raise AssertionError(msg)
    return result_dist
コード例 #38
0
    def check_message(orig, orig_typedef, new, new_typedef):
        for field_number in set(orig.keys()) | set(new.keys()):
            # verify all fields are there
            assert field_number in orig
            assert field_number in orig_typedef
            assert field_number in new
            assert field_number in new_typedef

            orig_values = orig[field_number]
            new_values = new[field_number]
            orig_type = orig_typedef[field_number]["type"]
            new_type = new_typedef[field_number]["type"]

            note("Parsing field# %s" % field_number)
            note("orig_values: %r" % orig_values)
            note("new_values: %r" % new_values)
            note("orig_type: %s" % orig_type)
            note("new_type: %s" % new_type)
            # Fields might be lists. Just convert everything to a list
            if not isinstance(orig_values, list):
                orig_values = [orig_values]
                assert not isinstance(new_values, list)
                new_values = [new_values]

            # if the types don't match, then try to convert them
            if new_type == "message" and orig_type in ["bytes", "string"]:
                # if the type is a message, we want to convert the orig type to a message
                # this isn't ideal, we'll be using the unintended type, but
                # best way to compare. Re-encoding a  message to binary might
                # not keep the field order
                new_field_typedef = new_typedef[field_number][
                    "message_typedef"]
                for i, orig_value in enumerate(orig_values):
                    if orig_type == "bytes":
                        (
                            orig_values[i],
                            orig_field_typedef,
                            _,
                        ) = length_delim.decode_lendelim_message(
                            length_delim.encode_bytes(orig_value),
                            config,
                            new_field_typedef,
                        )
                    else:
                        # string value
                        (
                            orig_values[i],
                            orig_field_typedef,
                            _,
                        ) = length_delim.decode_lendelim_message(
                            length_delim.encode_string(orig_value),
                            config,
                            new_field_typedef,
                        )
                    orig_typedef[field_number][
                        "message_typedef"] = orig_field_typedef
                orig_type = "message"

            if new_type == "string" and orig_type == "bytes":
                # our bytes were accidently valid string
                new_type = "bytes"
                for i, new_value in enumerate(new_values):
                    new_values[i], _ = length_delim.decode_bytes(
                        length_delim.encode_string(new_value), 0)
            # sort the lists with special handling for dicts
            orig_values.sort(
                key=lambda x: x if not isinstance(x, dict) else x.items())
            new_values.sort(
                key=lambda x: x if not isinstance(x, dict) else x.items())
            for orig_value, new_value in zip(orig_values, new_values):
                if orig_type == "message":
                    check_message(
                        orig_value,
                        orig_typedef[field_number]["message_typedef"],
                        new_value,
                        new_typedef[field_number]["message_typedef"],
                    )
                else:
                    assert orig_value == new_value
コード例 #39
0
 def update_short(self, buf):
     note("update_short: %s" % len(buf))
     self._update(buf)
コード例 #40
0
ファイル: models.py プロジェクト: vkutepov/schemathesis
def cant_serialize(media_type: str) -> NoReturn:  # type: ignore
    """Reject the current example if we don't know how to send this data to the application."""
    event_text = f"Can't serialize data to `{media_type}`."
    note(f"{event_text} {SERIALIZERS_SUGGESTION_MESSAGE}")
    event(event_text)
    reject()  # type: ignore
コード例 #41
0
def batch_reshapes(draw,
                   batch_shape=None,
                   event_dim=None,
                   enable_vars=False,
                   depth=None,
                   eligibility_filter=lambda name: True,
                   validate_args=True):
    """Strategy for drawing `BatchReshape` distributions.

  The underlying distribution is drawn from the `distributions` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `BatchReshape` distribution.  Note that the underlying distribution will
      in general have a different batch shape, to make the reshaping
      non-trivial.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}
    depth: Python `int` giving maximum nesting depth of compound Distributions.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn.
    validate_args: Python `bool`; whether to enable runtime assertions.

  Returns:
    dists: A strategy for drawing `BatchReshape` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes(min_ndims=1, max_side=4))

    # TODO(b/142135119): Wanted to draw general input and output shapes like the
    # following, but Hypothesis complained about filtering out too many things.
    # underlying_batch_shape = draw(tfp_hps.shapes(min_ndims=1))
    # hp.assume(
    #   batch_shape.num_elements() == underlying_batch_shape.num_elements())
    underlying_batch_shape = [tf.TensorShape(batch_shape).num_elements()]

    underlying = draw(
        distributions(batch_shape=underlying_batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1,
                      eligibility_filter=eligibility_filter,
                      validate_args=validate_args))
    hp.note('Forming BatchReshape with underlying dist {}; '
            'parameters {}; batch_shape {}'.format(underlying,
                                                   params_used(underlying),
                                                   batch_shape))
    result_dist = tfd.BatchReshape(underlying,
                                   batch_shape=batch_shape,
                                   validate_args=True)
    return result_dist
コード例 #42
0
    def _test_slicing(self, data, dist):
        strm = test_util.test_seed_stream()
        batch_shape = dist.batch_shape
        slices = data.draw(dhps.valid_slices(batch_shape))
        slice_str = 'dist[{}]'.format(', '.join(dhps.stringify_slices(slices)))
        # Make sure the slice string appears in Hypothesis' attempted example log
        hp.note('Using slice ' + slice_str)
        if not slices:  # Nothing further to check.
            return
        sliced_zeros = np.zeros(batch_shape)[slices]
        sliced_dist = dist[slices]
        hp.note('Using sliced distribution {}.'.format(sliced_dist))

        # Check that slicing modifies batch shape as expected.
        self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)

        if not sliced_zeros.size:
            # TODO(b/128924708): Fix distributions that fail on degenerate empty
            #     shapes, e.g. Multinomial, DirichletMultinomial, ...
            return

        # Check that sampling of sliced distributions executes.
        with tfp_hps.no_tf_rank_errors():
            samples = self.evaluate(dist.sample(seed=strm()))
            sliced_dist_samples = self.evaluate(
                sliced_dist.sample(seed=strm()))

        # Come up with the slices for samples (which must also include event dims).
        sample_slices = (tuple(slices) if isinstance(
            slices, collections.Sequence) else (slices, ))
        if Ellipsis not in sample_slices:
            sample_slices += (Ellipsis, )
        sample_slices += tuple([slice(None)] *
                               tensorshape_util.rank(dist.event_shape))

        sliced_samples = samples[sample_slices]

        # Report sub-sliced samples (on which we compare log_prob) to hypothesis.
        hp.note('Sample(s) for testing log_prob ' + str(sliced_samples))

        # Check that sampling a sliced distribution produces the same shape as
        # slicing the samples from the original.
        self.assertAllEqual(sliced_samples.shape, sliced_dist_samples.shape)

        # Check that a sliced distribution can compute the log_prob of its own
        # samples (up to numerical validation errors).
        with tfp_hps.no_tf_rank_errors():
            try:
                lp = self.evaluate(dist.log_prob(samples))
            except tf.errors.InvalidArgumentError:
                # TODO(b/129271256): d.log_prob(d.sample()) should not fail
                #     validate_args checks.
                # We only tolerate this case for the non-sliced dist.
                return
            sliced_lp = self.evaluate(sliced_dist.log_prob(sliced_samples))

        # Check that the sliced dist's log_prob agrees with slicing the original's
        # log_prob.

        # This `hp.assume` is suppressing array sizes that cause the sliced and
        # non-sliced distribution to follow different Eigen code paths.  Those
        # different code paths lead to arbitrarily large variations in the results
        # at parameter settings that Hypothesis is all too good at finding.  Since
        # the purpose of this test is just to check that we got slicing right, those
        # discrepancies are a distraction.
        # TODO(b/140229057): Remove this `hp.assume`, if and when Eigen's numerics
        # become index-independent.
        all_packetized = (_all_packetized(dist)
                          and _all_packetized(sliced_dist)
                          and _all_packetized(samples)
                          and _all_packetized(sliced_samples))
        hp.note('Packetization check {}'.format(all_packetized))
        all_non_packetized = (_all_non_packetized(dist)
                              and _all_non_packetized(sliced_dist)
                              and _all_non_packetized(samples)
                              and _all_non_packetized(sliced_samples))
        hp.note('Non-packetization check {}'.format(all_non_packetized))
        hp.assume(all_packetized or all_non_packetized)

        self.assertAllClose(lp[slices], sliced_lp, atol=1e-5, rtol=1e-5)
コード例 #43
0
def _all_ok(thing, one_ok):
    hp.note('Testing packetization of {}.'.format(thing))
    for s in _all_shapes(thing):
        if not one_ok(s):
            return False
    return True
コード例 #44
0
    def testDistribution(self, dist_name, data):
        seed = test_util.test_seed()
        # Explicitly draw event_dim here to avoid relying on _params_event_ndims
        # later, so this test can support distributions that do not implement the
        # slicing protocol.
        event_dim = data.draw(hps.integers(min_value=2, max_value=6))
        dist = data.draw(
            dhps.distributions(dist_name=dist_name,
                               event_dim=event_dim,
                               enable_vars=True))
        batch_shape = dist.batch_shape
        batch_shape2 = data.draw(
            tfp_hps.broadcast_compatible_shape(batch_shape))
        dist2 = data.draw(
            dhps.distributions(dist_name=dist_name,
                               batch_shape=batch_shape2,
                               event_dim=event_dim,
                               enable_vars=True))
        self.evaluate([var.initializer for var in dist.variables])

        # Check that the distribution passes Variables through to the accessor
        # properties (without converting them to Tensor or anything like that).
        for k, v in six.iteritems(dist.parameters):
            if not tensor_util.is_ref(v):
                continue
            self.assertIs(getattr(dist, k), v)

        # Check that standard statistics do not read distribution parameters more
        # than twice (once in the stat itself and up to once in any validation
        # assertions).
        max_permissible = 2 + extra_tensor_conversions_allowed(dist)
        for stat in sorted(
                data.draw(
                    hps.sets(hps.one_of(
                        map(hps.just, [
                            'covariance', 'entropy', 'mean', 'mode', 'stddev',
                            'variance'
                        ])),
                             min_size=3,
                             max_size=3))):
            hp.note('Testing excessive var usage in {}.{}'.format(
                dist_name, stat))
            try:
                with tfp_hps.assert_no_excessive_var_usage(
                        'statistic `{}` of `{}`'.format(stat, dist),
                        max_permissible=max_permissible):
                    getattr(dist, stat)()

            except NotImplementedError:
                pass

        # Check that `sample` doesn't read distribution parameters more than twice,
        # and that it produces non-None gradients (if the distribution is fully
        # reparameterized).
        with tf.GradientTape() as tape:
            # TDs do bijector assertions twice (once by distribution.sample, and once
            # by bijector.forward).
            max_permissible = 2 + extra_tensor_conversions_allowed(dist)
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `sample` of `{}`'.format(dist),
                    max_permissible=max_permissible):
                sample = dist.sample(seed=seed)
        if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED:
            grads = tape.gradient(sample, dist.variables)
            for grad, var in zip(grads, dist.variables):
                var_name = var.name.rstrip('_0123456789:')
                if var_name in NO_SAMPLE_PARAM_GRADS.get(dist_name, ()):
                    continue
                if grad is None:
                    raise AssertionError(
                        'Missing sample -> {} grad for distribution {}'.format(
                            var_name, dist_name))

        # Turn off validations, since TODO(b/129271256) log_prob can choke on dist's
        # own samples.  Also, to relax conversion counts for KL (might do >2 w/
        # validate_args).
        dist = dist.copy(validate_args=False)
        dist2 = dist2.copy(validate_args=False)

        # Test that KL divergence reads distribution parameters at most once, and
        # that is produces non-None gradients.
        try:
            for d1, d2 in (dist, dist2), (dist2, dist):
                with tf.GradientTape() as tape:
                    with tfp_hps.assert_no_excessive_var_usage(
                            '`kl_divergence` of (`{}` (vars {}), `{}` (vars {}))'
                            .format(d1, d1.variables, d2, d2.variables),
                            max_permissible=1
                    ):  # No validation => 1 convert per var.
                        kl = d1.kl_divergence(d2)
                wrt_vars = list(d1.variables) + list(d2.variables)
                grads = tape.gradient(kl, wrt_vars)
                for grad, var in zip(grads, wrt_vars):
                    if grad is None and dist_name not in NO_KL_PARAM_GRADS:
                        raise AssertionError(
                            'Missing KL({} || {}) -> {} grad:\n'  # pylint: disable=duplicate-string-formatting-argument
                            '{} vars: {}\n{} vars: {}'.format(
                                d1, d2, var, d1, d1.variables, d2,
                                d2.variables))
        except NotImplementedError:
            pass

        # Test that log_prob produces non-None gradients, except for distributions
        # on the NO_LOG_PROB_PARAM_GRADS blacklist.
        if dist_name not in NO_LOG_PROB_PARAM_GRADS:
            with tf.GradientTape() as tape:
                lp = dist.log_prob(tf.stop_gradient(sample))
            grads = tape.gradient(lp, dist.variables)
            for grad, var in zip(grads, dist.variables):
                if grad is None:
                    raise AssertionError(
                        'Missing log_prob -> {} grad for distribution {}'.
                        format(var, dist_name))

        # Test that all forms of probability evaluation avoid reading distribution
        # parameters more than once.
        for evaluative in sorted(
                data.draw(
                    hps.sets(hps.one_of(
                        map(hps.just, [
                            'log_prob', 'prob', 'log_cdf', 'cdf',
                            'log_survival_function', 'survival_function'
                        ])),
                             min_size=3,
                             max_size=3))):
            hp.note('Testing excessive var usage in {}.{}'.format(
                dist_name, evaluative))
            try:
                # No validation => 1 convert. But for TD we allow 2:
                # dist.log_prob(bijector.inverse(samp)) + bijector.ildj(samp)
                max_permissible = 2 + extra_tensor_conversions_allowed(dist)
                with tfp_hps.assert_no_excessive_var_usage(
                        'evaluative `{}` of `{}`'.format(evaluative, dist),
                        max_permissible=max_permissible):
                    getattr(dist, evaluative)(sample)
            except NotImplementedError:
                pass
コード例 #45
0
 def _accept(self, command: Command) -> None:
     note(str(command))
     self.goaltree.accept(command)
コード例 #46
0
def quantized_distributions(draw,
                            batch_shape=None,
                            event_dim=None,
                            enable_vars=False,
                            eligibility_filter=lambda name: True,
                            validate_args=True):
    """Strategy for drawing `QuantizedDistribution`s.

  The underlying distribution is drawn from the `base_distributions` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `QuantizedDistribution`. Hypothesis will pick a `batch_shape` if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn.
    validate_args: Python `bool`; whether to enable runtime assertions.

  Returns:
    dists: A strategy for drawing `QuantizedDistribution`s with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """

    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())

    low_quantile = draw(
        hps.one_of(hps.just(None), hps.floats(min_value=0.01, max_value=0.7)))
    high_quantile = draw(
        hps.one_of(hps.just(None), hps.floats(min_value=0.3, max_value=.99)))

    def ok(name):
        return eligibility_filter(name) and name in QUANTIZED_BASE_DISTS

    underlyings = base_distributions(
        batch_shape=batch_shape,
        event_dim=event_dim,
        enable_vars=enable_vars,
        eligibility_filter=ok,
    )
    underlying = draw(underlyings)

    if high_quantile is not None:
        high_quantile = tf.convert_to_tensor(high_quantile,
                                             dtype=underlying.dtype)
    if low_quantile is not None:
        low_quantile = tf.convert_to_tensor(low_quantile,
                                            dtype=underlying.dtype)
        if high_quantile is not None:
            high_quantile = ensure_high_gt_low(low_quantile, high_quantile)

    hp.note('Drawing QuantizedDistribution with underlying distribution'
            ' {}'.format(underlying))

    try:
        low = None if low_quantile is None else underlying.quantile(
            low_quantile)
        high = None if high_quantile is None else underlying.quantile(
            high_quantile)
    except NotImplementedError:
        # The following code makes ReproducibilityTest flaky in graph mode (but not
        # eager). Failures are due either to partial mismatch in the samples in
        # ReproducibilityTest or to `low` and/or `high` being NaN. For now, to avoid
        # this, we set `low` and `high` to `None` for distributions not implementing
        # `quantile`.

        # seed = test_util.test_seed(hardcoded_seed=123)
        # low = (None if low_quantile is None
        #        else underlying.sample(low_quantile.shape, seed=seed))
        # high = (None if high_quantile is None else
        #         underlying.sample(high_quantile.shape, seed=seed))
        low = None
        high = None

    # Ensure that `low` and `high` are ints contained in distribution support
    # and span at least a few bins.
    if high is not None:
        high = tf.clip_by_value(high, -2**23, 2**23)
        high = tf.math.ceil(high + 5.)

    if low is not None:
        low = tf.clip_by_value(low, -2**23, 2**23)
        low = tf.math.ceil(low)

    result_dist = tfd.QuantizedDistribution(distribution=underlying,
                                            low=low,
                                            high=high,
                                            validate_args=validate_args)

    return result_dist
コード例 #47
0
def kernel_input(
    draw,
    batch_shape,
    example_dim=None,
    example_ndims=None,
    feature_dim=None,
    feature_ndims=None,
    enable_vars=False,
    name=None):
  """Strategy for drawing arbitrary Kernel input.

  In order to avoid duplicates (or even numerically near-duplicates), we
  generate inputs on a grid. We let hypothesis generate the number of grid
  points and distance between grid points, within some reasonable pre-defined
  ranges. The result will be a batch of example sets, within which each set of
  examples has no duplicates (but no such duplication avoidance is applied
  accross batches).

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    batch_shape: `TensorShape`. The batch shape of the resulting
      kernel input.
    example_dim: Optional Python int giving the size of each example dimension.
      If omitted, Hypothesis will choose one.
    example_ndims: Optional Python int giving the number of example dimensions
      of the input. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: If `False`, the returned parameters are all Tensors, never
      Variables or DeferredTensor.
    name: Name to give the variable.

  Returns:
    kernel_input: A strategy for drawing kernel_input with the prescribed shape
      (or an arbitrary one if omitted).
  """
  if example_ndims is None:
    example_ndims = draw(hps.integers(min_value=1, max_value=2))
  if example_dim is None:
    example_dim = draw(hps.integers(min_value=2, max_value=4))

  if feature_ndims is None:
    feature_ndims = draw(hps.integers(min_value=1, max_value=2))
  if feature_dim is None:
    feature_dim = draw(hps.integers(min_value=2, max_value=4))

  batch_shape = tensorshape_util.as_list(batch_shape)
  example_shape = [example_dim] * example_ndims
  feature_shape = [feature_dim] * feature_ndims

  batch_size = int(np.prod(batch_shape))
  example_size = example_dim ** example_ndims
  feature_size = feature_dim ** feature_ndims

  # We would like each batch of examples to be unique, to avoid computing kernel
  # matrices that are semi-definite. hypothesis.extra.numpy.arrays doesn't have
  # a sense of tolerance, so we need to do some extra work to get points
  # sufficiently far from each other.
  grid_size = draw(hps.integers(min_value=10, max_value=100))
  grid_spacing = draw(hps.floats(min_value=1e-2, max_value=2))
  hp.note('Grid size {} and spacing {}'.format(grid_size, grid_spacing))

  def _grid_indices_to_values(grid_indices):
    return (grid_spacing *
            (np.array(grid_indices, dtype=np.float64) - np.float64(grid_size)))

  # We'll construct the result by stacking onto flattened batch, example and
  # feature dims, then reshape to unflatten at the end.
  result = np.zeros([0, example_size, feature_size])
  for _ in range(batch_size):
    seen = set()
    index_array_strategy = hps.tuples(
        *([hps.integers(0, grid_size + 1)] * feature_size)).filter(
            lambda x, seen=seen: x not in seen)  # Default param to sate pylint.
    examples = np.zeros([1, 0, feature_size])
    for _ in range(example_size):
      feature_grid_locations = draw(index_array_strategy)
      seen.add(feature_grid_locations)
      example = _grid_indices_to_values(feature_grid_locations)
      example = example[np.newaxis, np.newaxis, ...]
      examples = np.concatenate([examples, example], axis=1)
    result = np.concatenate([result, examples], axis=0)
  result = np.reshape(result, batch_shape + example_shape + feature_shape)

  if enable_vars and draw(hps.booleans()):
    result = tf.Variable(result, name=name)
    if draw(hps.booleans()):
      result = tfp_hps.defer_and_count_usage(result)
  return result
コード例 #48
0
def test_shuffle_is_noop(xss, rnd):
    yss = list(xss)
    rnd.shuffle(yss)
    note("Shuffle: {0}".format(yss))
    assert xss == yss
コード例 #49
0
def base_kernels(
    draw,
    kernel_name=None,
    batch_shape=None,
    event_dim=None,
    feature_dim=None,
    feature_ndims=None,
    enable_vars=False):
  """Strategy for drawing kernels that don't depend on other kernels.

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    kernel_name: Optional Python `str`.  If given, the produced kernels
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
  Returns:
    kernels: A strategy for drawing Kernels with the specified `batch_shape`
      (or an arbitrary one if omitted).
    kernel_variable_names: List of kernel parameters that are variables.
  """

  if kernel_name is None:
    kernel_name = draw(hps.sampled_from(sorted(INSTANTIABLE_BASE_KERNELS)))
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())
  if event_dim is None:
    event_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_dim is None:
    feature_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_ndims is None:
    feature_ndims = draw(hps.integers(min_value=2, max_value=6))

  kernel_params = draw(
      broadcasting_params(kernel_name, batch_shape, event_dim=event_dim,
                          enable_vars=enable_vars))
  kernel_variable_names = [
      k for k in kernel_params if tensor_util.is_ref(kernel_params[k])]
  hp.note('Forming kernel {} with feature_ndims {} and constrained parameters '
          '{}'.format(kernel_name, feature_ndims, kernel_params))
  ctor = getattr(tfpk, kernel_name)
  result_kernel = ctor(
      validate_args=True,
      feature_ndims=feature_ndims,
      **kernel_params)
  if batch_shape != result_kernel.batch_shape:
    msg = ('Kernel strategy generated a bad batch shape '
           'for {}, should have been {}.').format(result_kernel, batch_shape)
    raise AssertionError(msg)
  return result_kernel, kernel_variable_names
コード例 #50
0
 def update_long(self, length, random):
     buf = bytes((random.getrandbits(8) for _ in range(length)))
     note("update_long: %s" % buf)
     self._update(buf)
コード例 #51
0
def schur_complements(
    draw,
    batch_shape=None,
    event_dim=None,
    feature_dim=None,
    feature_ndims=None,
    enable_vars=None,
    depth=None):
  """Strategy for drawing `SchurComplement` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `SchurComplement` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
  if depth is None:
    depth = draw(depths())
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())
  if event_dim is None:
    event_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_dim is None:
    feature_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_ndims is None:
    feature_ndims = draw(hps.integers(min_value=2, max_value=6))

  base_kernel, kernel_variable_names = draw(kernels(
      batch_shape=batch_shape,
      event_dim=event_dim,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      enable_vars=False,
      depth=depth-1))

  # SchurComplement requires the inputs to have one example dimension.
  fixed_inputs = draw(kernel_input(
      batch_shape=batch_shape,
      example_ndims=1,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims))
  # Positive shift to ensure the divisor matrix is PD.
  diag_shift = np.float64(draw(hpnp.arrays(
      dtype=np.float64,
      shape=tensorshape_util.as_list(batch_shape),
      elements=hps.floats(1, 100, allow_nan=False, allow_infinity=False))))

  hp.note('Forming SchurComplement kernel with fixed_inputs: {} '
          'and diag_shift: {}'.format(fixed_inputs, diag_shift))

  schur_complement_params = {
      'fixed_inputs': fixed_inputs,
      'diag_shift': diag_shift
  }
  for param_name in schur_complement_params:
    if enable_vars and draw(hps.booleans()):
      kernel_variable_names.append(param_name)
      schur_complement_params[param_name] = tf.Variable(
          schur_complement_params[param_name], name=param_name)
      if draw(hps.booleans()):
        schur_complement_params[param_name] = tfp_hps.defer_and_count_usage(
            schur_complement_params[param_name])
  result_kernel = tfp.math.psd_kernels.SchurComplement(
      base_kernel=base_kernel,
      fixed_inputs=schur_complement_params['fixed_inputs'],
      diag_shift=schur_complement_params['diag_shift'],
      validate_args=True)
  return result_kernel, kernel_variable_names
コード例 #52
0
def transformed_distributions(draw,
                              batch_shape=None,
                              event_dim=None,
                              enable_vars=False,
                              depth=None):
    """Strategy for drawing `TransformedDistribution`s.

  The transforming bijector is drawn from the
  `bijectors.hypothesis_testlib.unconstrained_bijectors` strategy.

  The underlying distribution is drawn from the `distributions` strategy, except
  that it must be compatible with the bijector according to
  `bijectors.hypothesis_testlib.distribution_filter_for` (these generally check
  that vector bijectors are not combined with scalar distributions, etc).

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `TransformedDistribution`.  The underlying distribution will sometimes
      have the same `batch_shape`, and sometimes have scalar batch shape.
      Hypothesis will pick a `batch_shape` if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `TransformedDistribution`s with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    bijector = draw(bijector_hps.unconstrained_bijectors())
    hp.note(
        'Drawing TransformedDistribution with bijector {}'.format(bijector))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    underlying_batch_shape = batch_shape
    batch_shape_arg = None
    if draw(hps.booleans()):
        # Use batch_shape overrides.
        underlying_batch_shape = tf.TensorShape([])  # scalar underlying batch
        batch_shape_arg = batch_shape
    underlyings = distributions(
        batch_shape=underlying_batch_shape,
        event_dim=event_dim,
        enable_vars=enable_vars,
        depth=depth - 1).filter(bijector_hps.distribution_filter_for(bijector))
    to_transform = draw(underlyings)
    hp.note('Forming TransformedDistribution with '
            'underlying distribution {}; parameters {}'.format(
                to_transform, params_used(to_transform)))
    # TODO(bjp): Add test coverage for `event_shape` argument of
    # `TransformedDistribution`.
    result_dist = tfd.TransformedDistribution(bijector=bijector,
                                              distribution=to_transform,
                                              batch_shape=batch_shape_arg,
                                              validate_args=True)
    if batch_shape != result_dist.batch_shape:
        msg = ('TransformedDistribution strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist, batch_shape)
        raise AssertionError(msg)
    return result_dist
コード例 #53
0
def feature_scaleds(
    draw,
    batch_shape=None,
    event_dim=None,
    feature_dim=None,
    feature_ndims=None,
    enable_vars=None,
    depth=None):
  """Strategy for drawing `FeatureScaled` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `FeatureScaled` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
  if depth is None:
    depth = draw(depths())
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())
  if event_dim is None:
    event_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_dim is None:
    feature_dim = draw(hps.integers(min_value=2, max_value=6))
  if feature_ndims is None:
    feature_ndims = draw(hps.integers(min_value=2, max_value=6))

  base_kernel, kernel_variable_names = draw(kernels(
      batch_shape=batch_shape,
      event_dim=event_dim,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      enable_vars=False,
      depth=depth-1))
  scale_diag = tfp_hps.softplus_plus_eps()(draw(kernel_input(
      batch_shape=batch_shape,
      example_ndims=0,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims)))

  hp.note('Forming FeatureScaled kernel with scale_diag: {} '.format(
      scale_diag))

  if enable_vars and draw(hps.booleans()):
    kernel_variable_names.append('scale_diag')
    scale_diag = tf.Variable(scale_diag, name='scale_diag')
    # Don't enable variable counting. This is because rescaling is
    # done for each input, which will exceed two convert_to_tensor calls.
  result_kernel = tfp.math.psd_kernels.FeatureScaled(
      kernel=base_kernel,
      scale_diag=scale_diag,
      validate_args=True)
  return result_kernel, kernel_variable_names
コード例 #54
0
def test_compare_geth_hevm(b):
    code = b.hex()
    note("code that caused failure: ")
    note(code)
    # prepopulate the stack a bit
    x = os.system(
        'evm --code ' + code +
        ' --gas 0xffffffffffffffff --json --receiver 0xacab --nomemory --prestate ./genesis.json run  > gethout'
    )
    y = os.system(
        'hevm exec --code ' + code +
        ' --gas 0xffffffffffffffff --chainid 0x539 --gaslimit 0xfffffffff --jsontrace --origin 0x73656e646572 --caller 0x73656e646572 > hevmout'
    )
    assert x == y
    gethlines = open('gethout').read().split('\n')
    hevmlines = open('hevmout').read().split('\n')
    target(float(len(gethlines)))
    for i in range(len(hevmlines) - 3):
        gethline = gethlines[i]
        hevmline = hevmlines[i]
        hjson = json.loads(hevmline)
        gjson = json.loads(gethline)
        ## printed when diverging
        note('')
        note('--- STEP ----')
        note('geth thinks that')
        note(gethline)
        note('while hevm believes')
        note(hevmline)
        note('')

        assert hjson['pc'] == gjson['pc']
        assert hjson['stack'] == gjson['stack']
        # we can't compare memsize for now because geth
        # measures memory and memsize after the instruction,
        # as opposed to all other fields...
        # assert hjson['memSize'] == gjson['memSize']
        assert hjson['gas'] == gjson['gas']
    gethres = json.loads(gethlines[len(gethlines) - 2])
    hevmres = json.loads(hevmlines[len(hevmlines) - 2])
    note('--- OUTPUT ----')
    note('geth thinks that')
    note(gethres)
    note('while hevm believes')
    note(hevmres)
    assert gethres['output'] == hevmres['output']
    assert gethres['gasUsed'] == hevmres['gasUsed']
コード例 #55
0
 def foo(x):
     if x > 11:
         note('Lo')
         failing[0] += 1
         assert False
コード例 #56
0
def base_distributions(draw,
                       dist_name=None,
                       batch_shape=None,
                       event_dim=None,
                       enable_vars=False,
                       eligibility_filter=lambda name: True):
    """Strategy for drawing arbitrary base Distributions.

  This does not draw compound distributions like `Independent`,
  `MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
  that do not accept other Distributions as arguments.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    dist_name: Optional Python `str`.  If given, the produced distributions
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Distribution.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn at the top level.

  Returns:
    dists: A strategy for drawing Distributions with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if dist_name is None:
        names = [
            k for k in INSTANTIABLE_BASE_DISTS.keys() if eligibility_filter(k)
        ]
        dist_name = draw(hps.sampled_from(sorted(names)))

    if dist_name == 'Empirical':
        variants = [
            k for k in INSTANTIABLE_BASE_DISTS.keys()
            if eligibility_filter(k) and 'Empirical' in k
        ]
        dist_name = draw(hps.sampled_from(sorted(variants)))

    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())

    params_kwargs = draw(
        broadcasting_params(dist_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    params_constrained = constraint_for(dist_name)(params_kwargs)
    hp.note('Forming dist {} with constrained parameters {}'.format(
        dist_name, params_constrained))
    assert_shapes_unchanged(params_kwargs, params_constrained)
    params_constrained['validate_args'] = True
    dist_cls = INSTANTIABLE_BASE_DISTS[dist_name].cls
    result_dist = dist_cls(**params_constrained)
    if batch_shape != result_dist.batch_shape:
        msg = ('Distributions strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist, batch_shape)
        raise AssertionError(msg)
    return result_dist
コード例 #57
0
def mixtures_same_family(draw,
                         batch_shape=None,
                         event_dim=None,
                         enable_vars=False,
                         depth=None):
    """Strategy for drawing `MixtureSameFamily` distributions.

  The component distribution is drawn from the `distributions` strategy.

  The Categorical mixture distributions are either shared across all batch
  members, or drawn independently for the full batch (as required by
  `MixtureSameFamily`).

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `MixtureSameFamily` distribution.  The component distribution will have a
      batch shape of 1 rank higher (for the components being mixed).  Hypothesis
      will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the component
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `MixtureSameFamily` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    if batch_shape is None:
        # Ensure the components dist has at least one batch dim (a component dim).
        batch_shape = draw(tfp_hps.shapes(min_ndims=1, min_lastdimsize=2))
    else:  # This mixture adds a batch dim to its underlying components dist.
        batch_shape = tensorshape_util.concatenate(
            batch_shape,
            draw(tfp_hps.shapes(min_ndims=1, max_ndims=1, min_lastdimsize=2)))

    component = draw(
        distributions(batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1))
    hp.note(
        'Drawing MixtureSameFamily with component {}; parameters {}'.format(
            component, params_used(component)))
    # scalar or same-shaped categorical?
    mixture_batch_shape = draw(
        hps.one_of(hps.just(batch_shape[:-1]), hps.just(tf.TensorShape([]))))
    mixture_dist = draw(
        base_distributions(dist_name='Categorical',
                           batch_shape=mixture_batch_shape,
                           event_dim=tensorshape_util.as_list(batch_shape)[-1],
                           enable_vars=enable_vars))
    hp.note(('Forming MixtureSameFamily with '
             'mixture distribution {}; parameters {}').format(
                 mixture_dist, params_used(mixture_dist)))
    result_dist = tfd.MixtureSameFamily(components_distribution=component,
                                        mixture_distribution=mixture_dist,
                                        validate_args=True)
    if batch_shape[:-1] != result_dist.batch_shape:
        msg = ('MixtureSameFamily strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist,
                                                      batch_shape[:-1])
        raise AssertionError(msg)
    return result_dist
コード例 #58
0
    def _test_slicing(self, data, dist):
        strm = tfp_test_util.test_seed_stream()
        batch_shape = dist.batch_shape
        slices = data.draw(valid_slices(batch_shape))
        slice_str = 'dist[{}]'.format(', '.join(stringify_slices(slices)))
        # Make sure the slice string appears in Hypothesis' attempted example log
        hp.note('Using slice ' + slice_str)
        if not slices:  # Nothing further to check.
            return
        sliced_zeros = np.zeros(batch_shape)[slices]
        sliced_dist = dist[slices]

        # Check that slicing modifies batch shape as expected.
        self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)

        if not sliced_zeros.size:
            # TODO(b/128924708): Fix distributions that fail on degenerate empty
            #     shapes, e.g. Multinomial, DirichletMultinomial, ...
            return

        # Check that sampling of sliced distributions executes.
        with no_tf_rank_errors():
            samples = self.evaluate(dist.sample(seed=strm()))
            sliced_samples = self.evaluate(sliced_dist.sample(seed=strm()))

        # Come up with the slices for samples (which must also include event dims).
        sample_slices = (tuple(slices) if isinstance(
            slices, collections.Sequence) else (slices, ))
        if Ellipsis not in sample_slices:
            sample_slices += (Ellipsis, )
        sample_slices += tuple([slice(None)] *
                               tensorshape_util.rank(dist.event_shape))

        # Report sub-sliced samples (on which we compare log_prob) to hypothesis.
        hp.note('Sample(s) for testing log_prob ' +
                str(samples[sample_slices]))

        # Check that sampling a sliced distribution produces the same shape as
        # slicing the samples from the original.
        self.assertAllEqual(samples[sample_slices].shape, sliced_samples.shape)

        # Check that a sliced distribution can compute the log_prob of its own
        # samples (up to numerical validation errors).
        with no_tf_rank_errors():
            try:
                lp = self.evaluate(dist.log_prob(samples))
            except tf.errors.InvalidArgumentError:
                # TODO(b/129271256): d.log_prob(d.sample()) should not fail
                #     validate_args checks.
                # We only tolerate this case for the non-sliced dist.
                return
            sliced_lp = self.evaluate(
                sliced_dist.log_prob(samples[sample_slices]))

        # Check that the sliced dist's log_prob agrees with slicing the original's
        # log_prob.
        # TODO(b/128708201): Better numerics for Geometric/Beta?
        # Eigen can return quite different results for packet vs non-packet ops.
        # To work around this, we use a much larger rtol for the last 3
        # (assuming packet size 4) elements.
        packetized_lp = lp[slices].reshape(-1)[:-3]
        packetized_sliced_lp = sliced_lp.reshape(-1)[:-3]
        rtol = (0.1 if any(x in dist.name for x in ('Geometric', 'Beta',
                                                    'Dirichlet')) else 0.02)
        self.assertAllClose(packetized_lp, packetized_sliced_lp, rtol=rtol)
        possibly_nonpacket_lp = lp[slices].reshape(-1)[-3:]
        possibly_nonpacket_sliced_lp = sliced_lp.reshape(-1)[-3:]

        # TODO(b/266018543): Resolve nan disagreement between eigen vec/scalar paths
        hasnan = (np.isnan(possibly_nonpacket_lp)
                  | np.isnan(possibly_nonpacket_sliced_lp))
        possibly_nonpacket_lp = np.where(hasnan, 0, possibly_nonpacket_lp)
        possibly_nonpacket_sliced_lp = np.where(hasnan, 0,
                                                possibly_nonpacket_sliced_lp)
        self.assertAllClose(possibly_nonpacket_lp,
                            possibly_nonpacket_sliced_lp,
                            rtol=0.4,
                            atol=1e-4)
コード例 #59
0
def bijectors(draw,
              bijector_name=None,
              batch_shape=None,
              event_dim=None,
              enable_vars=False):
    """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the whitelist `TF2_FRIENDLY_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if bijector_name is None:
        bijector_name = draw(hps.sampled_from(TF2_FRIENDLY_BIJECTORS))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if bijector_name == 'Invert':
        underlying_name = draw(
            hps.sampled_from(sorted(set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        bijector_params = {'bijector': underlying}
        msg = 'Forming Invert bijector with underlying bijector {}.'
        hp.note(msg.format(underlying))
    elif bijector_name == 'TransformDiagonal':
        underlying_name = draw(
            hps.sampled_from(sorted(TRANSFORM_DIAGONAL_WHITELIST)))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=(),
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        bijector_params = {'diag_bijector': underlying}
        msg = 'Forming TransformDiagonal bijector with underlying bijector {}.'
        hp.note(msg.format(underlying))
    elif bijector_name == 'Inline':
        scale = draw(
            tfp_hps.maybe_variable(
                hps.sampled_from(np.float32([1., -1., 2, -2.])), enable_vars))
        b = tfb.Scale(scale=scale)

        bijector_params = dict(
            forward_fn=CallableModule(b.forward, b),
            inverse_fn=b.inverse,
            forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian(  # pylint: disable=g-long-lambda
                x,
                event_ndims=b.forward_min_event_ndims),
            forward_min_event_ndims=b.forward_min_event_ndims,
            is_constant_jacobian=b.is_constant_jacobian,
            is_increasing=b._internal_is_increasing,  # pylint: disable=protected-access
        )
    elif bijector_name == 'DiscreteCosineTransform':
        dct_type = hps.integers(min_value=2, max_value=3)
        bijector_params = {'dct_type': draw(dct_type)}
    elif bijector_name == 'PowerTransform':
        power = hps.floats(min_value=1e-6, max_value=10.)
        bijector_params = {'power': draw(power)}
    elif bijector_name == 'Permute':
        event_ndims = draw(hps.integers(min_value=1, max_value=2))
        axis = hps.integers(min_value=-event_ndims, max_value=-1)
        # This is a permutation of dimensions within an axis.
        # (Contrast with `Transpose` below.)
        bijector_params = {
            'axis':
            draw(axis),
            'permutation':
            draw(
                tfp_hps.maybe_variable(hps.permutations(np.arange(event_dim)),
                                       enable_vars,
                                       dtype=tf.int32))
        }
    elif bijector_name == 'Reshape':
        event_shape_out = draw(tfp_hps.shapes(min_ndims=1))
        # TODO(b/142135119): Wanted to draw general input and output shapes like the
        # following, but Hypothesis complained about filtering out too many things.
        # event_shape_in = draw(tfp_hps.shapes(min_ndims=1))
        # hp.assume(event_shape_out.num_elements() == event_shape_in.num_elements())
        event_shape_in = [event_shape_out.num_elements()]
        bijector_params = {
            'event_shape_out': event_shape_out,
            'event_shape_in': event_shape_in
        }
    elif bijector_name == 'Transpose':
        event_ndims = draw(hps.integers(min_value=0, max_value=2))
        # This is a permutation of axes.
        # (Contrast with `Permute` above.)
        bijector_params = {
            'perm': draw(hps.permutations(np.arange(event_ndims)))
        }
    else:
        bijector_params = draw(
            broadcasting_params(bijector_name,
                                batch_shape,
                                event_dim=event_dim,
                                enable_vars=enable_vars))
    ctor = getattr(tfb, bijector_name)
    return ctor(validate_args=True, **bijector_params)
コード例 #60
0
def base_distributions(draw,
                       dist_name=None,
                       batch_shape=None,
                       event_dim=None,
                       enable_vars=False,
                       eligibility_filter=lambda name: True,
                       validate_args=True):
    """Strategy for drawing arbitrary base Distributions.

  This does not draw compound distributions like `Independent`,
  `MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
  that do not accept other Distributions as arguments.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    dist_name: Optional Python `str`.  If given, the produced distributions
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Distribution.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn at the top level.
    validate_args: Python `bool`; whether to enable runtime assertions.

  Returns:
    dists: A strategy for drawing Distributions with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if dist_name is None:
        names = [
            k for k in INSTANTIABLE_BASE_DISTS.keys() if eligibility_filter(k)
        ]
        dist_name = draw(hps.sampled_from(sorted(names)))

    if dist_name == 'Empirical':
        variants = [
            k for k in INSTANTIABLE_BASE_DISTS.keys()
            if eligibility_filter(k) and 'Empirical' in k
        ]
        dist_name = draw(hps.sampled_from(sorted(variants)))

    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())

    # Draw raw parameters
    params_kwargs = draw(
        broadcasting_params(dist_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    hp.note('Forming dist {} with raw parameters {}'.format(
        dist_name, params_kwargs))

    # Constrain them to legal values
    params_constrained = constraint_for(dist_name)(params_kwargs)

    # Sometimes the "distribution constraint" fn may replace c2t-tracking
    # DeferredTensor params with Tensor params (e.g. fix_triangular). In such
    # cases, we preserve the c2t-tracking DeferredTensors by wrapping them but
    # ignoring the value.  We similarly reinstate raw tf.Variables, so they
    # appear in the distribution's `variables` list and can be initialized.
    for k in params_constrained:
        if (k in params_kwargs
                and isinstance(params_kwargs[k],
                               (tfp_util.DeferredTensor, tf.Variable))
                and params_kwargs[k] is not params_constrained[k]):

            def constrained_value(v, val=params_constrained[k]):
                # While the gradient to v will be 0, we only care about the c2t counts.
                return v * 0 + val

            params_constrained[k] = tfp_util.DeferredTensor(
                params_kwargs[k], constrained_value)

    hp.note('Forming dist {} with constrained parameters {}'.format(
        dist_name, params_constrained))
    assert_shapes_unchanged(params_kwargs, params_constrained)
    params_constrained['validate_args'] = validate_args

    if dist_name in ['Wishart', 'WishartTriL']:
        # With the default `input_output_cholesky = False`, Wishart occasionally
        # produces samples for which the Cholesky decompositions fail, causing
        # an error in testDistribution when `log_prob` is called on a sample.
        params_constrained['input_output_cholesky'] = True

    # Actually construct the distribution
    dist_cls = INSTANTIABLE_BASE_DISTS[dist_name].cls
    result_dist = dist_cls(**params_constrained)

    # Check that the batch shape came out as expected
    if batch_shape != result_dist.batch_shape:
        msg = ('Distributions strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist, batch_shape)
        raise AssertionError(msg)
    return result_dist