コード例 #1
0
def test_2d_compare_with_numpy(size, nx, xmin, xmax, ny, ymin, ymax):

    if xmax <= xmin or ymax <= ymin:
        return

    x = arrays(np.float, size, elements=st.floats(-1000, 1000)).example()
    y = arrays(np.float, size, elements=st.floats(-1000, 1000)).example()

    try:
        reference = np.histogram2d(x, y, bins=(nx, ny),
                                   range=((xmin, xmax), (ymin, ymax)))[0]
    except:
        # If Numpy fails, we skip the comparison since this isn't our fault
        return

    # First, check the Numpy result because it sometimes doesn't make sense. See
    # bug report https://github.com/numpy/numpy/issues/9435
    n_inside = np.sum((x <= xmax) & (x >= xmin) & (y <= ymax) & (y >= ymin))
    if n_inside != np.sum(reference):
        return

    fast = histogram2d(x, y, bins=(nx, ny),
                       range=((xmin, xmax), (ymin, ymax)))

    print(x, y, nx, xmin, xmax, ny, ymin, ymax)

    np.testing.assert_equal(fast, reference)
コード例 #2
0
ファイル: testing.py プロジェクト: Autoplectic/dit
def markov_chains(draw, alphabets=((2, 4), (2, 4), (2, 4))):
    """
    Generate Markov chains for use with hypothesis.

    Parameters
    ----------
    draw : function
        A sampling function passed in by hypothesis.
    alphabets : int, tuple of ints, tuple of pairs of ints
        If an int, it is the length of the chain and each variable is assumed to be binary.
        If a tuple of ints, the ints are assumed to be the size of each variable. If a tuple
        of pairs of ints, each pair represents the min and max alphabet size of each variable.

    Returns
    -------
    dist : Distribution
        A Markov chain with variable sizes.
    """
    try:
        len(alphabets)
        try:
            len(alphabets[0])
        except TypeError:
            alphabets = tuple((alpha, alpha) for alpha in alphabets)
    except TypeError:
        alphabets = ((2, 2),)*alphabets

    alphabets = [int(draw(integers(*alpha))) for alpha in alphabets]

    px = draw(arrays(np.float, shape=alphabets[0], elements=floats(0, 1)))
    cds = [draw(arrays(np.float, shape=(a, b), elements=floats(0, 1))) for a, b in pairwise(alphabets)]

    # assume things
    assume(px.sum() > 0)
    for cd in cds:
        for row in cd:
            assume(row.sum() > 0)

    px /= px.sum()

    # construct dist
    for cd in cds:
        cd /= cd.sum(axis=1, keepdims=True)
        slc = (np.newaxis,)*(len(px.shape)-1) + (colon, colon)
        px = px[..., np.newaxis] * cd[slc]

    dist = Distribution.from_ndarray(px)
    dist.normalize()
    return dist
コード例 #3
0
def _inputs(draw):
    rows_num = draw(st.integers(1, 100))
    index_num = draw(st.integers(1, 10))
    batch_size = draw(st.integers(2, 10))
    return (
        draw(hnp.arrays(
            np.float32,
            (batch_size, rows_num, 2),
            elements=st.floats(-10.0, 10.0),
        )),
        draw(hnp.arrays(
            np.int32,
            (index_num, 1),
            elements=st.integers(0, rows_num - 1),
        )),
    )
コード例 #4
0
ファイル: testing_hypothesis.py プロジェクト: dwhoman/CVPI
def np_images(draw,
              number,
              width=st.integers(min_img_width, max_img_width).example(),
              height=st.integers(min_img_height, max_img_height).example()):
    return draw(st.lists(hnp.arrays(np.uint32, (width,height),
                                    elements=st.integers(0,max_uint32)),
                         min_size=number, max_size=number))
コード例 #5
0
def flat_tri(draw):
  """
  Triangle in the xy plane in general position.
  """
  tri = draw(arrays(np.int32, (3, 2),
  elements=st.integers(min_value=-1000, max_value=1000)))

  x1 = draw(st.integers(min_value=-1000, max_value=1000))
  x2 = draw(st.integers(min_value=-1000, max_value=1000))
  x3 = draw(st.integers(min_value=-1000, max_value=1000))

  assume(not x1 == x2)
  assume(not x2 == x3)
  assume(not x1 == x3)

  y1 = draw(st.integers(min_value=-1000, max_value=1000))
  y2 = draw(st.integers(min_value=-1000, max_value=1000))
  y3 = draw(st.integers(min_value=-1000, max_value=1000))

  assume(not y1 == y2)
  assume(not y2 == y3)
  assume(not y1 == y3)

  tri = np.array([[x1, y1, 1],
                  [x2, y2, 1],
                  [x3, y3, 1]], dtype=np.float32)

  assume(not check_line(tri[0], tri[1], tri[2]))

  assume(not np.all(tri[0] == tri[1]))
  assume(not np.all(tri[0] == tri[2]))
  assume(not np.all(tri[1] == tri[2]))

  return(tri)
コード例 #6
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_may_fill_with_nan_when_unique_is_set():
    find_any(
        nps.arrays(
            dtype=float, elements=st.floats(allow_nan=False), shape=10,
            unique=True, fill=st.just(float('nan'))),
        lambda x: np.isnan(x).any()
    )
コード例 #7
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_can_minimize_large_arrays():
    x = minimal(
        nps.arrays(u'uint32', 100), lambda x: np.any(x) and not np.all(x),
        timeout_after=60
    )
    assert np.logical_or(x == 0, x == 1).all()
    assert np.count_nonzero(x) in (1, len(x) - 1)
コード例 #8
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_infer_strategy_from_dtype(dtype, data):
    # Given a dtype
    assert isinstance(dtype, np.dtype)
    # We can infer a strategy
    strat = nps.from_dtype(dtype)
    assert isinstance(strat, SearchStrategy)
    # And use it to fill an array of that dtype
    data.draw(nps.arrays(dtype, 10, strat))
コード例 #9
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_is_still_unique_with_nan_fill():
    @given(nps.arrays(
           dtype=float, elements=st.floats(allow_nan=False), shape=10,
           unique=True, fill=st.just(float('nan'))))
    def test(xs):
        assert len(set(xs)) == len(xs)

    test()
コード例 #10
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_may_not_fill_with_non_nan_when_unique_is_set():
    @given(nps.arrays(
        dtype=float, elements=st.floats(allow_nan=False), shape=10,
        unique=True, fill=st.just(0.0)))
    def test(arr):
        pass

    with pytest.raises(InvalidArgument):
        test()
コード例 #11
0
ファイル: test_gen_data.py プロジェクト: sunito/hypothesis
def test_may_not_fill_with_non_nan_when_unique_is_set_and_type_is_not_number():
    @given(nps.arrays(
        dtype=bytes, shape=10,
        unique=True, fill=st.just(b'')))
    def test(arr):
        pass

    with pytest.raises(InvalidArgument):
        test()
コード例 #12
0
ファイル: impl.py プロジェクト: rboulton/hypothesis
    def result(draw):
        index = draw(index_strategy)

        if len(index) > 0:
            if dtype is not None:
                result_data = draw(npst.arrays(
                    dtype=dtype, elements=elements, shape=len(index),
                    fill=fill, unique=unique,
                ))
            else:
                result_data = list(draw(npst.arrays(
                    dtype=object, elements=elements, shape=len(index),
                    fill=fill, unique=unique,
                )))

            return pandas.Series(
                result_data, index=index, dtype=dtype
            )
        else:
            return pandas.Series(
                (), index=index,
                dtype=dtype if dtype is not None else draw(
                    dtype_for_elements_strategy(elements)))
コード例 #13
0
def id_list_batch(draw):
    num_inputs = draw(st.integers(1, 3))
    batch_size = draw(st.integers(5, 10))
    values_dtype = draw(st.sampled_from([np.int32, np.int64]))
    inputs = []
    for _ in range(num_inputs):
        size = draw(st.integers(5, 10))
        values = draw(hnp.arrays(values_dtype, size, st.integers(1, 10)))
        lengths = draw(hu.lengths(len(values),
                                  min_segments=batch_size,
                                  max_segments=batch_size))
        inputs.append(lengths)
        inputs.append(values)
    return inputs
コード例 #14
0
ファイル: strategies.py プロジェクト: gelavizh1/thinc
def arrays_OPFI_BI_lengths(max_B=5, max_P=3, max_F=5, max_I=8):
    shapes = tuples(
                lengths(hi=max_B),
                lengths(hi=max_P),
                lengths(hi=max_F),
                lengths(hi=max_I),
                arrays('int32', shape=(5,),
                    elements=integers(min_value=1, max_value=10)))

    strat = shapes.flatmap(
        lambda opfi_lengths: tuples(
            ndarrays_of_shape(opfi_lengths[:-1]),
            ndarrays_of_shape((sum(opfi_lengths[-1]), opfi_lengths[-2])),
            just(opfi_lengths[-1])))
    return strat
コード例 #15
0
def test_1d_compare_with_numpy(size, nx, xmin, xmax):

    if xmax <= xmin:
        return

    x = arrays(np.float, size, elements=st.floats(-1000, 1000)).example()

    reference = np.histogram(x, bins=nx, range=(xmin, xmax))[0]

    # First, check the Numpy result because it sometimes doesn't make sense. See
    # bug report https://github.com/numpy/numpy/issues/9435
    n_inside = np.sum((x <= xmax) & (x >= xmin))
    if n_inside != np.sum(reference):
        return

    fast = histogram1d(x, bins=nx, range=(xmin, xmax))

    np.testing.assert_equal(fast, reference)
コード例 #16
0
ファイル: testing.py プロジェクト: Autoplectic/dit
def distributions(draw, alphabets=(2, 2, 2), nondegenerate=False):
    """
    Generate distributions for use with hypothesis.

    Parameters
    ----------
    draw : function
        A sampling function passed in by hypothesis.
    alphabets : int, tuple of ints, tuple of pairs of ints
        If an int, it is the length of the outcomes and each variable is assumed to be binary.
        If a tuple of ints, the ints are assumed to be the size of each variable. If a tuple
        of pairs of ints, each pair represents the min and max alphabet size of each variable.

    Returns
    -------
    dist : Distribution
        A distribution with variable sizes.
    """
    try:
        len(alphabets)
        try:
            len(alphabets[0])
        except TypeError:
            alphabets = tuple((alpha, alpha) for alpha in alphabets)
    except TypeError:
        alphabets = ((2, 2),)*alphabets

    alphabets = [int(draw(integers(*alpha))) for alpha in alphabets]

    pmf = draw(arrays(np.float, shape=alphabets, elements=floats(0, 1)))

    assume(pmf.sum() > 0)

    if nondegenerate:
        axes = set(range(len(alphabets)))
        for axis, _ in enumerate(alphabets):
            assume(np.all(pmf.sum(axis=tuple(axes-set([axis]))) > 1e-6))

    pmf /= pmf.sum()

    dist = Distribution.from_ndarray(pmf)
    dist.normalize()
    return dist
コード例 #17
0
ファイル: test_filter.py プロジェクト: moneytech/giotto-tda
import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import array_shapes, arrays
from hypothesis.strategies import integers, floats
from numpy.testing import assert_almost_equal
from scipy.spatial.distance import pdist, squareform

from gtda.mapper import Eccentricity, Entropy, Projection
from gtda.mapper.utils._list_feature_union import ListFeatureUnion
from gtda.mapper.utils.decorators import method_to_transform

from sklearn.neighbors import KernelDensity


@given(X=arrays(dtype=np.float,
                elements=floats(allow_nan=False, allow_infinity=False),
                shape=array_shapes(min_dims=2, max_dims=2)),
       exponent=integers(min_value=1, max_value=100))
def test_eccentricity_shape_equals_number_of_samples(X, exponent):
    """Verify that eccentricity preserves the nb of samples in the input."""
    eccentricity = Eccentricity(exponent=exponent)
    Xt = eccentricity.fit_transform(X)
    assert Xt.shape == (len(X), 1)


@given(X=arrays(dtype=np.float,
                elements=floats(allow_nan=False, allow_infinity=False),
                shape=array_shapes(min_dims=2, max_dims=2)))
def test_eccentricity_values_with_infinity_norm_equals_max_row_values(X):
    eccentricity = Eccentricity(exponent=np.inf)
    Xt = eccentricity.fit_transform(X)
コード例 #18
0
ファイル: numpy_test.py プロジェクト: kdeleeuw11/probability
def array_and_diagonal(draw):
    side = draw(hps.integers(1, 10))
    shape = draw(shapes(min_dims=2, min_side=side, max_side=side))
    array = draw(hnp.arrays(np.float64, shape, elements=floats()))
    diag = draw(hnp.arrays(np.float64, shape[:-1], elements=floats()))
    return array, diag
コード例 #19
0
class Test_non_max_suppression:
    """ Ensure that non-maximum suppression (NMS) correctly suppresses expected values. """
    @given(
        boxes=hnp.arrays(
            dtype=float,
            shape=st.tuples(st.integers(0, 100), st.just(5)),
            elements=st.floats(1e-05, 100),
            unique=True,
        ),
        data=st.data(),
    )
    def test_shapes(self, boxes: ndarray, data: st.SearchStrategy):
        """ Ensure that non_max_suppression produces the correct shape output, even for empty inputs. """
        scores = boxes[:, 4]
        boxes = boxes[:, :4].cumsum(
            axis=1)  # ensures no 0-width or -height boxes

        N = scores.shape[0]
        nms = non_max_suppression(boxes, scores)

        assert (
            nms.shape[0] <= N
        )  # we're suppressing, so we can never end up with more things than we started with
        assert nms.ndim == 1

    def test_empty(self):
        """ Ensure that non_max_suppression works correctly with zero detections. """
        x = np.empty((0, 4))
        scores = np.empty((0, ))
        nms = non_max_suppression(x, scores)
        msg = "non_max_suppression failed to produce the expected output for zero detections"
        assert nms.shape == (0, ), msg

    @given(
        x=hnp.arrays(dtype=float, shape=(1, 4), elements=st.floats(1e-05,
                                                                   100)),
        score=st.floats(0, 1),
        rep=st.integers(2, 100),
    )
    def test_identical(self, x, score, rep):
        """ Ensure that non_max_suppression works correctly for identical boxes and that ordering doesn't matter. """
        x = x.cumsum(axis=1)
        x = x.repeat(rep).reshape(x.shape[1], rep).T
        score = np.array([score] * rep)
        idx = np.random.randint(len(x))
        score[idx] = 1000

        nms = non_max_suppression(x, score)
        msg = "non_max_suppression failed to produce the expected output when all detections are identical"
        assert_array_equal(nms, np.array([idx]), msg)

        nms = non_max_suppression(x, score, threshold=1)
        msg = "non_max_suppression failed to produce the expected output for identical detections with threshold 1"
        assert_array_equal(nms, np.array(range(len(x))), msg)

    @given(
        x=hnp.arrays(dtype=float, shape=(1, 4), elements=st.floats(1e-05,
                                                                   100)),
        score=st.floats(0, 1),
    )
    def test_single_detections(self, x: ndarray, score):
        """ Ensure that a single detection is not suppressed. """
        nms = non_max_suppression(x, np.array([score]))
        msg = "non_max_suppression failed to produce the expected output for a single detection"
        assert_array_equal(nms, np.array([0]), msg)

    @pytest.mark.parametrize(
        ("threshold", "desired_nms"),
        [(0.5, np.array([0, 1])), (0.25, np.array([0, 1])),
         (0.15, np.array([1]))],
    )
    def test_known_results(self, threshold, desired_nms):
        """ Ensures that non_max_suppression works correctly for known values. """
        boxes = np.array([[0, 0, 1, 1], [0.5, 0.5, 0.9, 0.9]])
        scores = np.array([0, 1])

        actual_nms = non_max_suppression(boxes, scores, threshold=threshold)
        assert_array_equal(actual_nms, desired_nms)
コード例 #20
0
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import minimal, find_any
from hypothesis.extra.numpy import arrays


@given(arrays(object, 100, st.lists(max_size=0)))
def test_generated_lists_are_distinct(ls):
    assert len(set(map(id, ls))) == len(ls)


@st.composite
def distinct_integers(draw):
    used = draw(st.shared(st.builds(set), key='distinct_integers.used'))
    i = draw(st.integers(0, 2**64 - 1).filter(lambda x: x not in used))
    used.add(i)
    return i


@given(arrays('uint64', 10, distinct_integers()))
def test_does_not_reuse_distinct_integers(arr):
    assert len(set(arr)) == len(arr)
コード例 #21
0
    mygrad_func=var,
    true_func=_var,
    num_arrays=1,
    kwargs=dict(axis=partial(axis_arg, min_dim=1),
                keepdims=keepdims_arg,
                ddof=ddof_arg),
    vary_each_element=True,
    index_to_bnds={0: (-10, 10)},
)
def test_var_bkwd():
    pass


@given(x=hnp.arrays(
    dtype=np.float,
    shape=hnp.array_shapes(),
    elements=st.floats(allow_infinity=False, allow_nan=False),
))
def test_var_no_axis_fwd(x):
    import mygrad as mg

    x = mg.Tensor(x, constant=False)
    o = mg.var(x, axis=())
    assert np.all(o.data == np.zeros_like(x.data))


@given(x=hnp.arrays(
    dtype=np.float,
    shape=hnp.array_shapes(),
    elements=st.floats(allow_infinity=False, allow_nan=False),
))
コード例 #22
0
ファイル: strategies.py プロジェクト: grundprinzip/arrow-1
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        type = draw(type)
    elif not isinstance(type, pa.DataType):
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size, )

    if pa.types.is_list(type) or pa.types.is_large_list(type):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(type.value_type, size=int(offsets.sum())))
        array_type = (pa.LargeListArray
                      if pa.types.is_large_list(type) else pa.ListArray)
        return array_type.from_arrays(offsets, values)

    if pa.types.is_struct(type):
        h.assume(len(type) > 0)
        fields, child_arrays = [], []
        for field in type:
            fields.append(field)
            child_arrays.append(draw(arrays(field.type, size=size)))
        return pa.StructArray.from_arrays(child_arrays, fields=fields)

    if (pa.types.is_boolean(type) or pa.types.is_integer(type)
            or pa.types.is_floating(type)):
        values = npst.arrays(type.to_pandas_dtype(), shape=(size, ))
        np_arr = draw(values)
        if pa.types.is_floating(type):
            # Workaround ARROW-4952: no easy way to assert array equality
            # in a NaN-tolerant way.
            np_arr[np.isnan(np_arr)] = -42.0
        return pa.array(np_arr, type=type)

    if pa.types.is_null(type):
        value = st.none()
    elif pa.types.is_time(type):
        value = st.times()
    elif pa.types.is_date(type):
        value = st.dates()
    elif pa.types.is_timestamp(type):
        tz = pytz.timezone(type.tz) if type.tz is not None else None
        value = st.datetimes(timezones=st.just(tz))
    elif pa.types.is_duration(type):
        value = st.timedeltas()
    elif pa.types.is_binary(type) or pa.types.is_large_binary(type):
        value = st.binary()
    elif pa.types.is_string(type) or pa.types.is_large_string(type):
        value = st.text()
    elif pa.types.is_decimal(type):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(type)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=type)
コード例 #23
0
ファイル: test_fill_values.py プロジェクト: vlulla/hypothesis
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER

from hypothesis import given, strategies as st
from hypothesis.extra.numpy import arrays

from tests.common.debug import find_any, minimal


@given(arrays(object, 100, elements=st.builds(list)))
def test_generated_lists_are_distinct(ls):
    assert len(set(map(id, ls))) == len(ls)


@st.composite
def distinct_integers(draw):
    used = draw(st.shared(st.builds(set), key="distinct_integers.used"))
    i = draw(st.integers(0, 2**64 - 1).filter(lambda x: x not in used))
    used.add(i)
    return i


@given(arrays("uint64", 10, elements=distinct_integers()))
def test_does_not_reuse_distinct_integers(arr):
    assert len(set(arr)) == len(arr)
コード例 #24
0
ファイル: utils.py プロジェクト: woshahua/eli5
def rnd_len_arrays(dtype, min_len=0, max_len=3, elements=None):
    """ Generate numpy arrays of random length """
    lengths = integers(min_value=min_len, max_value=max_len)
    return lengths.flatmap(lambda n: arrays(dtype, n, elements=elements))
コード例 #25
0
ファイル: test_polygon.py プロジェクト: msemelman/coxeter
    verts = points[hull.vertices]
    poly = polygon_from_hull(points[hull.vertices])
    assert np.all(poly.vertices[:, :2] == verts)


@settings(deadline=500)
@given(EllipseSurfaceStrategy)
@example(np.array([[1, 1], [1, 1.00041707], [2.78722762, 1], [2.72755193, 1.32128906]]))
def test_convex_area(points):
    """Check the areas of various convex sets."""
    hull = ConvexHull(points)
    poly = polygon_from_hull(points[hull.vertices])
    assert np.isclose(hull.volume, poly.area)


@given(random_quat=arrays(np.float64, (4,), elements=floats(-1, 1, width=64)))
def test_rotation_signed_area(random_quat):
    """Ensure that rotating does not change the signed area."""
    assume(not np.all(random_quat == 0))
    random_quat = rowan.normalize(random_quat)
    rotated_points = rowan.rotate(random_quat, get_square_points())
    poly = Polygon(rotated_points)
    assert np.isclose(poly.signed_area, 1)

    poly.reorder_verts(clockwise=True)
    assert np.isclose(poly.signed_area, -1)


@settings(deadline=500)
@given(EllipseSurfaceStrategy)
def test_set_convex_area(points):
コード例 #26
0
    -------
    strategy : np.ndarray
        Strategy to produce an array that broadcasts with the given core
        dimension signature, with a constant value of thet dtype.

    See Also
    --------
    `hypothesis.extra.numpy.arrays`
    """
    dtype, shapes, elements = draw(_arrays_args(signature, code_st, kwds))
    fill = draw(elements)
    return np.full(shapes[0], fill, dtype)


matrices_b = hyn.arrays(dtype=np.float64,
                        shape=hyn.array_shapes(min_dims=2),
                        elements=real_numbers())
matrices_c = hyn.arrays(dtype=np.float64,
                        shape=hyn.array_shapes(min_dims=2, max_dims=2),
                        elements=real_numbers())
vectors = hyn.arrays(dtype=np.float64,
                     shape=hyn.array_shapes(min_dims=1, max_dims=1),
                     elements=real_numbers())

# =============================================================================
# Helpers for TestCaseNumpy with Hypothesis
# =============================================================================


def core_only(*arrays: np.ndarray, dims: int = 2) -> np.ndarray:
    """Strip all non-core dimensions from arrays
コード例 #27
0
dtype_strat_numpy = st.sampled_from(
    (np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64)
)

shape_functions = [
    (np.random.sample, sample),
    (np.random.random_sample, random_sample),
    (np.random.ranf, ranf),
    (np.random.random, random),
]


@given(
    a=hnp.arrays(
        shape=hnp.array_shapes(max_side=4, max_dims=5), dtype=dtype_strat_numpy
    )
)
@pytest.mark.parametrize("np_function,mg_function", shape_functions)
def test_random_shape_funcs(np_function, mg_function, a):
    shape = a.shape
    np.random.seed(0)
    arr = np_function(shape)
    np.random.seed(0)
    tens = mg_function(shape)
    assert_allclose(arr, tens.data)


unpacked_shape_functions = [(np.random.rand, rand), (np.random.randn, randn)]

コード例 #28
0
class TestColorExtraction:
    def test_colorspace_conversion_rgb_hsv_single_color(self):
        """
        Assert that converting one color from HSV to RGB and back
        (or vice versa) results in the same color
        """
        rgb_color = np.array([10, 20, 30], np.uint8)
        hsv_color = np.array([105, 170, 30], np.uint8)

        assert_allclose(rgb_to_hsv(rgb_color), hsv_color)

        assert_allclose(hsv_to_rgb(hsv_color), rgb_color)

    def test_colorspace_conversion_rgb_hsv_input_shapes(self):
        """
        Test that colorspace conversion works with input arrays of
        different shapes
        """
        rgb_color = np.array([10, 20, 30], np.uint8)
        rgb_2D = np.tile(rgb_color, 5).reshape(5, 3)
        rgb_3D = np.tile(rgb_color, 5 * 5).reshape(5, 5, 3)

        def there_and_back_again(color_arr):
            return hsv_to_rgb(rgb_to_hsv(color_arr))

        assert_allclose(there_and_back_again(rgb_2D), rgb_2D)
        assert_allclose(there_and_back_again(rgb_3D), rgb_3D)

    @given(arrays(shape=(1), dtype=np.uint8))
    def test_colorspace_conversion_rgb_gray_single(self, gray_color):
        """
        Test that conversion from gray to rgb and back works when given
        a single color as input
        """
        rgb = gray_to_rgb(gray_color)
        assert rgb.shape == (3, )
        assert np.all(rgb == gray_color)
        gray = rgb_to_gray(rgb)
        assert_allclose(gray, gray_color)

    @given(arrays(shape=(20), dtype=np.uint8))
    def test_colorspace_conversion_rgb_gray_list(self, gray_list):
        """
        Test that conversion from gray to rgb and back works when given
        a list of color as input
        """
        rgb = gray_to_rgb(gray_list)
        assert rgb.shape == (20, 3)
        assert rgb.sum() == gray_list.sum() * 3
        gray = rgb_to_gray(rgb)
        assert_allclose(gray, gray_list)

    @given(arrays(shape=(20, 20), dtype=np.uint8))
    def test_colorspace_conversion_rgb_gray_image(self, gray_image):
        """
        Test that conversion from gray to rgb and back works when given
        an image as input
        """
        rgb = gray_to_rgb(gray_image)
        assert rgb.shape == (20, 20, 3)
        assert rgb.sum() == gray_image.sum() * 3
        gray = rgb_to_gray(rgb)
        assert_allclose(gray, gray_image)

    @given(arrays(shape=(20, 20), dtype=np.bool))
    def test_colorspace_conversion_mask_to_rgb(self, mask):
        """
        Test that converting a mask to rgb replaces the False values
        with the given background color and the True values with
        the given foreground color
        """
        fg_color = (255, 0, 0)  # red
        rgb = mask_to_rgb(mask, bg_color=[0, 0, 0], fg_color=fg_color)
        assert mask.sum() == rgb.sum() / sum(fg_color)

    def test_colorspace_conversion_mask_to_rgb_fg_colors(self):
        img_name, img = get_test_image()
        mask = np.empty(img.shape[:2], dtype=np.bool)
        mask[:] = False
        mask[50:100, 100:150] = True
        out_img = mask_to_rgb_with_fg_colors_from_image(
            mask, (255, 255, 255), img)
        assert out_img.shape == img.shape
        assert np.all(out_img[~mask] == 255)
コード例 #29
0
ファイル: test_binary_funcs.py プロジェクト: flappyBug/MyGrad
                       as_decimal=False,
                       atol=1e-4,
                       rtol=1e-4,
                       vary_each_element=True)
def test_logaddexp2_bkwd():
    pass


@fwdprop_test_factory(mygrad_func=maximum, true_func=np.maximum, num_arrays=2)
def test_maximum_fwd():
    pass


@settings(deadline=2000)
@given(x=hnp.arrays(shape=hnp.array_shapes(max_side=4, max_dims=5),
                    dtype=float,
                    elements=st.floats(-10., 10.)),
       data=st.data())
def test_maximum_bkwd(x, data):
    y = data.draw(hnp.arrays(shape=broadcastable_shape(x.shape, max_dim=5),
                             dtype=float,
                             elements=st.floats(-10., 10.)),
                  label="y")

    assume(not np.any(np.isclose(x, y)))

    x_arr = Tensor(np.copy(x))
    y_arr = Tensor(np.copy(y))
    o = maximum(x_arr, y_arr)

    grad = data.draw(hnp.arrays(shape=o.shape,
コード例 #30
0
"""
import pytest

pytest.importorskip("hypothesis")
# isort: split

import hypothesis.extra.numpy as npst
import hypothesis.strategies as st
from hypothesis import given

import xarray as xr

an_array = npst.arrays(
    dtype=st.one_of(npst.unsigned_integer_dtypes(), npst.integer_dtypes(),
                    npst.floating_dtypes()),
    shape=npst.array_shapes(max_side=3),  # max_side specified for performance
)


@pytest.mark.slow
@given(st.data(), an_array)
def test_CFMask_coder_roundtrip(data, arr):
    names = data.draw(
        st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim,
                 unique=True).map(tuple))
    original = xr.Variable(names, arr)
    coder = xr.coding.variables.CFMaskCoder()
    roundtripped = coder.decode(coder.encode(original))
    xr.testing.assert_identical(original, roundtripped)
コード例 #31
0
ファイル: test_data.py プロジェクト: diedrikvanden/SCope
import numpy as np
from hypothesis import given
from hypothesis.strategies import floats, integers, lists
from hypothesis.extra.numpy import arrays

from scopeserver.dataserver.utils import data


@given(arrays(np.float, shape=(1, 100), elements=floats(-1000, 1000)))
def test_vmax(values):
    expected_100 = np.amax(values)
    expected_99 = np.clip(np.percentile(values, 99), 0.01, expected_100)
    assert data.get_99_and_100_percentiles(values) == (expected_99,
                                                       expected_100)


@given(lists(integers()))
def test_uniq(values):
    unique_values = data.uniq(values)
    assert set(values) == set(unique_values)
    assert len(set(values)) == len(unique_values)
コード例 #32
0
def square_array(draw):
    """Generate a square numpy array."""
    n = draw(st.integers(min_value=1, max_value=50))

    return draw(arrays('float', (n, n), elements=st.floats(allow_nan=False)))
コード例 #33
0
class Test_generate_targets:
    """ Ensure that the generate_targets function produces the correct target values. """
    @given(
        boxes=hnp.arrays(
            dtype=float,
            shape=st.tuples(st.integers(0, 3), st.just(4)),
            elements=st.floats(1, 100),
            unique=True,
        ),
        truth=hnp.arrays(
            dtype=float,
            shape=st.tuples(st.integers(0, 3), st.just(4)),
            elements=st.floats(1, 100),
            unique=True,
        ),
        data=st.data(),
    )
    def test_shapes(self, boxes: ndarray, truth: ndarray,
                    data: st.SearchStrategy):
        """ Ensure the shape returned by generate_targets is correct, even in edge cases producing empty arrays. """
        boxes = boxes.cumsum(
            axis=1)  # to ensure we don't hit 0-width or -height boxes
        truth = truth.cumsum(
            axis=1)  # to ensure we don't hit 0-width or -height boxes
        N = boxes.shape[0]
        K = truth.shape[0]
        labels = data.draw(hnp.arrays(dtype=int, shape=(K, )))
        cls, reg = generate_targets(boxes, truth, labels, 0.5, 0.4)

        msg = "generate_targets failed to produce classification targets of the correct shape"
        assert cls.shape == (N, ), msg

        msg = "generate_targets failed to produce regression targets of the correct shape"
        assert reg.shape == (N, 4), msg

    @given(x=hnp.arrays(dtype=float, shape=(5, 4), elements=st.floats(1, 100)))
    def test_identical_proposed_and_truth(self, x: ndarray):
        """ Ensure that generate_targets produces regression targets that are zero for identical proposal and truth. """
        x = x.cumsum(axis=1)  # ensure (l, t, r , b)
        labels = np.array([0] * 5)
        _, reg = generate_targets(x, x, labels, 0.5, 0.4)
        msg = "generate_targets failed to produce the expected output when the proposed boxes are identical to ground truth"
        assert_allclose(actual=reg,
                        desired=np.zeros_like(x),
                        atol=1e-5,
                        rtol=1e-5,
                        err_msg=msg)

    @given(shuffle_inds=st.permutations(np.arange(3)))
    def test_known_regression_values(self, shuffle_inds: List[int]):
        """ Ensure that generate_targets works for known values. Ensure that datum ordering does not matter. """
        prop = np.array([
            [-0.5, -0.5, 0.5, 0.5],  # neither axis matches truth
            [0, -0.5, np.exp(1), 0.5],  # x matches truth
            [-0.5, 0, 0.5, np.exp(1)],  # y matches truth
        ])
        truth = np.array([[0, 0, np.exp(1), np.exp(1)]])
        labels = np.array([1.0])

        out_reg = np.array([
            [np.exp(1) / 2, np.exp(1) / 2, 1, 1],
            [0, np.exp(1) / 2, 0, 1],
            [np.exp(1) / 2, 0, 1, 0],
        ])

        _, reg = generate_targets(prop[shuffle_inds], truth, labels, 0.5, 0.4)
        msg = "generate_targets failed to produce known-correct regression values"
        assert_allclose(actual=reg,
                        desired=out_reg[shuffle_inds],
                        atol=1e-5,
                        rtol=1e-5,
                        err_msg=msg)

    @given(
        label0=st.integers(1, 10),
        label1=st.integers(1, 10),
        shuffle_inds=st.permutations(np.arange(4)),
    )
    def test_label_invariance(self, label0: int, label1: int,
                              shuffle_inds: List[int]):
        """ Ensure that datum ordering doesn't matter for generate_targets. """
        # xyxy format
        prop = np.array([
            [-0.5, -0.5, 0.5,
             0.5],  # iou = 1 (truth 0) should be marked poitiive
            [0.0, -0.5, 0.49,
             0.5],  # iou = 0.5  (truth 0) should be marked ignore
            [0.0, -0.5, 0.39,
             0.5],  # iou = 0.39  (truth 0) should be marked negative
            [10.0, 10.0, 11, 11],
        ])  # iou = 1 (truth 1) should be marked positive

        # xyxy format
        truth = np.array([[-0.5, -0.5, 0.5, 0.5], [10.0, 10.0, 11, 11]])

        labels = np.array([label0, label1])

        out_labels = np.array(
            [label0, -1, 0,
             label1])  # truth 0 / ignore / background / truth 1 from above

        labels, reg = generate_targets(prop[shuffle_inds], truth, labels, 0.5,
                                       0.4)
        msg = "generate_targets is not invariant to datum-ordering"
        assert_allclose(actual=labels,
                        desired=out_labels[shuffle_inds],
                        err_msg=msg)
コード例 #34
0
ファイル: strategies.py プロジェクト: emkornfield/arrow
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        type = draw(type)
    elif not isinstance(type, pa.DataType):
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size,)

    if pa.types.is_list(type):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(type.value_type, size=int(offsets.sum())))
        return pa.ListArray.from_arrays(offsets, values)

    if pa.types.is_struct(type):
        h.assume(len(type) > 0)
        names, child_arrays = [], []
        for field in type:
            names.append(field.name)
            child_arrays.append(draw(arrays(field.type, size=size)))
        # fields' metadata are lost here, because from_arrays doesn't accept
        # a fields argumentum, only names
        return pa.StructArray.from_arrays(child_arrays, names=names)

    if (pa.types.is_boolean(type) or pa.types.is_integer(type) or
            pa.types.is_floating(type)):
        values = npst.arrays(type.to_pandas_dtype(), shape=(size,))
        np_arr = draw(values)
        if pa.types.is_floating(type):
            # Workaround ARROW-4952: no easy way to assert array equality
            # in a NaN-tolerant way.
            np_arr[np.isnan(np_arr)] = -42.0
        return pa.array(np_arr, type=type)

    if pa.types.is_null(type):
        value = st.none()
    elif pa.types.is_time(type):
        value = st.times()
    elif pa.types.is_date(type):
        value = st.dates()
    elif pa.types.is_timestamp(type):
        tz = pytz.timezone(type.tz) if type.tz is not None else None
        value = st.datetimes(timezones=st.just(tz))
    elif pa.types.is_binary(type):
        value = st.binary()
    elif pa.types.is_string(type):
        value = st.text()
    elif pa.types.is_decimal(type):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(type)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=type)
コード例 #35
0
ファイル: test_fill_values.py プロジェクト: vlulla/hypothesis
def test_minimizes_to_fill():
    result = minimal(arrays(float, 10, fill=st.just(3.0)))
    assert (result == 3.0).all()
コード例 #36
0
ファイル: test_encode_decode.py プロジェクト: benbovy/xarray
import hypothesis.extra.numpy as npst
import hypothesis.strategies as st
from hypothesis import given, settings

import xarray as xr

# Run for a while - arrays are a bigger search space than usual
settings.register_profile("ci", deadline=None)
settings.load_profile("ci")


an_array = npst.arrays(
    dtype=st.one_of(
        npst.unsigned_integer_dtypes(),
        npst.integer_dtypes(),
        npst.floating_dtypes(),
    ),
    shape=npst.array_shapes(max_side=3),  # max_side specified for performance
)


@given(st.data(), an_array)
def test_CFMask_coder_roundtrip(data, arr):
    names = data.draw(st.lists(st.text(), min_size=arr.ndim,
                               max_size=arr.ndim, unique=True).map(tuple))
    original = xr.Variable(names, arr)
    coder = xr.coding.variables.CFMaskCoder()
    roundtripped = coder.decode(coder.encode(original))
    xr.testing.assert_identical(original, roundtripped)

コード例 #37
0
def test_may_reuse_distinct_integers_if_asked():
    find_any(
        arrays('uint64', 10, distinct_integers(), fill=distinct_integers()),
        lambda x: len(set(x)) < len(x))
コード例 #38
0
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER

from __future__ import division, print_function, absolute_import

import hypothesis.strategies as st
from hypothesis import given
from tests.common.debug import minimal, find_any
from hypothesis.extra.numpy import arrays


@given(arrays(object, 100, st.lists(max_size=0)))
def test_generated_lists_are_distinct(ls):
    assert len(set(map(id, ls))) == len(ls)


@st.composite
def distinct_integers(draw):
    used = draw(st.shared(st.builds(set), key='distinct_integers.used'))
    i = draw(st.integers(0, 2 ** 64 - 1).filter(lambda x: x not in used))
    used.add(i)
    return i


@given(arrays('uint64', 10, distinct_integers()))
def test_does_not_reuse_distinct_integers(arr):
    assert len(set(arr)) == len(arr)
コード例 #39
0
ファイル: types.py プロジェクト: ankitdobhal/hypothesis
    _global_type_lookup[re.Match] = (st.text().map(
        lambda c: re.match(".", c, flags=re.DOTALL)).filter(bool))
    _global_type_lookup[re.Pattern] = st.builds(re.compile,
                                                st.sampled_from(["", b""]))
if sys.version_info[:2] >= (3, 9):  # pragma: no cover
    # subclass of MutableMapping, and in Python 3.9 we resolve to a union
    # which includes this... but we don't actually ever want to build one.
    _global_type_lookup[os._Environ] = st.just(os.environ)

try:  # pragma: no cover
    import numpy as np

    from hypothesis.extra.numpy import array_dtypes, array_shapes, arrays, scalar_dtypes

    _global_type_lookup[np.dtype] = array_dtypes()
    _global_type_lookup[np.ndarray] = arrays(scalar_dtypes(),
                                             array_shapes(max_dims=2))
except ImportError:
    pass

_global_type_lookup.update({
    # Note: while ByteString notionally also represents the bytearray and
    # memoryview types, it is a subclass of Hashable and those types are not.
    # We therefore only generate the bytes type.
    typing.ByteString:
    st.binary(),
    collections.abc.ByteString:
    st.binary(),
    # TODO: SupportsAbs and SupportsRound should be covariant, ie have functions.
    typing.SupportsAbs:
    st.one_of(
        st.booleans(),
コード例 #40
0
def test_minimizes_to_fill():
    result = minimal(arrays(float, 10, fill=st.just(3.0)))
    assert (result == 3.0).all()
コード例 #41
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
#
# END HEADER

from __future__ import division, print_function, absolute_import

import numpy as np
import pytest

import hypothesis.strategies as st
from flaky import flaky
from hypothesis import find, given, settings
from hypothesis.extra.numpy import arrays, from_dtype
from hypothesis.strategytests import strategy_test_suite
from hypothesis.internal.compat import text_type, binary_type

TestFloats = strategy_test_suite(arrays(float, ()))
TestIntMatrix = strategy_test_suite(arrays(int, (3, 2)))
TestBoolTensor = strategy_test_suite(arrays(bool, (2, 2, 2)))


STANDARD_TYPES = list(map(np.dtype, [
    u'int8', u'int32', u'int64',
    u'float', u'float32', u'float64',
    complex,
    bool, text_type, binary_type
]))


@pytest.mark.parametrize(u't', STANDARD_TYPES)
def test_produces_instances(t):
    @given(from_dtype(t))
コード例 #42
0
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
from hypothesis import given
from numpy.testing import assert_array_equal
from scipy import special

from mygrad.math._special import logsumexp
from tests.custom_strategies import valid_axes


@given(
    data=st.data(),
    x=hnp.arrays(shape=hnp.array_shapes(),
                 dtype=np.float,
                 elements=st.floats(-1e6, 1e6)),
    keepdims=st.booleans(),
)
def test_logsumexp(data: st.SearchStrategy, x: np.ndarray, keepdims: bool):
    axes = data.draw(valid_axes(ndim=x.ndim), label="axes")
    mygrad_result = logsumexp(x, axis=axes, keepdims=keepdims)
    scipy_result = special.logsumexp(x, axis=axes, keepdims=keepdims)
    assert_array_equal(
        mygrad_result,
        scipy_result,
        err_msg="mygrad's implementation of logsumexp does "
        "not match that of scipy's",
    )
コード例 #43
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
def test_can_minimize_large_arrays_easily():
    x = find(arrays(u'uint32', 1000), lambda t: t.any())
    assert x.sum() == 1
コード例 #44
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
def test_can_create_arrays_of_composite_types():
    arr = find(arrays(object, 100, foos), lambda x: True)
    for x in arr:
        assert isinstance(x, Foo)
コード例 #45
0
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.

# END HEADER

from __future__ import division, print_function, absolute_import

import numpy as np
import pytest
import hypothesis.strategies as st
from hypothesis import find, given
from hypothesis.extra.numpy import arrays, from_dtype
from hypothesis.strategytests import strategy_test_suite
from hypothesis.internal.compat import text_type, binary_type

TestFloats = strategy_test_suite(arrays(float, ()))
TestIntMatrix = strategy_test_suite(arrays(int, (3, 2)))
TestBoolTensor = strategy_test_suite(arrays(bool, (2, 2, 2)))

STANDARD_TYPES = list(
    map(np.dtype, [
        u'int8', u'int32', u'int64', u'float', u'float32', u'float64', complex,
        bool, text_type, binary_type
    ]))


@pytest.mark.parametrize(u't', STANDARD_TYPES)
def test_produces_instances(t):
    @given(from_dtype(t))
    def test_is_t(x):
        assert isinstance(x, t.type)
コード例 #46
0
def hypothesis_float_matrix_strategy(number_of_rows: int,
                                     number_of_cols: int) -> SearchStrategy:
    return hnp.arrays(dtype=float, shape=(number_of_rows, number_of_cols))
コード例 #47
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
def test_generates_and_minimizes():
    x = find(arrays(float, (2, 2)), lambda t: True)
    assert (x == np.zeros(shape=(2, 2), dtype=float)).all()
コード例 #48
0
def test_generates_and_minimizes():
    x = find(arrays(float, (2, 2)), lambda t: True)
    assert (x == np.zeros(shape=(2, 2), dtype=float)).all()
コード例 #49
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
def test_can_minimize_float_arrays():
    x = find(
        arrays(float, 50), lambda t: t.sum() >= 1.0,
        settings=settings(database=None))
    assert 1.0 <= x.sum() <= 1.1
コード例 #50
0
def test_can_minimize_large_arrays_easily():
    x = find(arrays(u'uint32', 1000), lambda t: t.any())
    assert x.sum() == 1
コード例 #51
0
ファイル: test_gen_data.py プロジェクト: degustaf/hypothesis
def test_can_create_arrays_of_tuples():
    arr = find(
        arrays(object, 10, st.tuples(st.integers(), st.integers())),
        lambda x: all(t[0] < t[1] for t in x))
    for a in arr:
        assert a in ((0, 1), (-1, 0))
コード例 #52
0
def test_can_minimize_float_arrays():
    x = find(arrays(float, 100), lambda t: t.sum() >= 1.0)
    assert 1.0 <= x.sum() <= 1.01
コード例 #53
0
ファイル: types.py プロジェクト: sunito/hypothesis
        int: st.integers().filter(lambda x: isinstance(x, int)),
        long: st.integers().map(long)  # noqa
    })

try:
    from hypothesis.extra.pytz import timezones
    _global_type_lookup[datetime.tzinfo] = timezones()
except ImportError:  # pragma: no cover
    pass
try:  # pragma: no cover
    import numpy as np
    from hypothesis.extra.numpy import \
        arrays, array_shapes, scalar_dtypes, nested_dtypes
    _global_type_lookup.update({
        np.dtype: nested_dtypes(),
        np.ndarray: arrays(scalar_dtypes(), array_shapes(max_dims=2)),
    })
except ImportError:  # pragma: no cover
    pass

try:
    import typing
except ImportError:  # pragma: no cover
    pass
else:
    _global_type_lookup.update({
        typing.ByteString: st.binary(),
        typing.io.BinaryIO: st.builds(io.BytesIO, st.binary()),  # type: ignore
        typing.io.TextIO: st.builds(io.StringIO, st.text()),  # type: ignore
        typing.Reversible: st.lists(st.integers()),
        typing.SupportsAbs: st.complex_numbers(),
コード例 #54
0
def test_can_create_arrays_of_composite_types():
    arr = find(arrays(object, 100, foos), lambda x: True)
    for x in arr:
        assert isinstance(x, Foo)
コード例 #55
0
ファイル: strategies.py プロジェクト: JonasSC/SuMPF
# primitive data #
##################

frequencies = st.floats(min_value=0.0, max_value=1e15)
non_zero_frequencies = st.floats(min_value=1e-15, max_value=1e15)
sampling_rates = st.floats(min_value=1e-8, max_value=1e8)
resolutions = sampling_rates
short_lengths = st.integers(min_value=0, max_value=2 ** 10)
texts = st.text(alphabet=st.characters(blacklist_categories=("Cs",), blacklist_characters=("\x00",)))

##########
# Signal #
##########

_signal_parameters = {"channels": stn.arrays(dtype=numpy.float64,       # pylint: disable=no-value-for-parameter; there is a false alarm about a missing parameter for ``draw``
                                             shape=stn.array_shapes(min_dims=2, max_dims=2),
                                             elements=st.floats(min_value=-1e100, max_value=1e100)),
                      "sampling_rate": sampling_rates,
                      "offset": st.integers(min_value=-2 ** 24, max_value=2 ** 24),
                      "labels": st.lists(elements=texts)}
signal_parameters = st.fixed_dictionaries(_signal_parameters)
signals = st.builds(sumpf.Signal, **_signal_parameters)
_normalized_signal_parameters = {"channels": stn.arrays(dtype=numpy.float64,       # pylint: disable=no-value-for-parameter; there is a false alarm about a missing parameter for ``draw``
                                                        shape=stn.array_shapes(min_dims=2, max_dims=2),
                                                        elements=st.floats(min_value=-255.0 / 256.0, max_value=254.0 / 256.0)),     # pylint: disable=line-too-long
                                 "sampling_rate": sampling_rates,
                                 "offset": st.integers(min_value=-2 ** 24, max_value=2 ** 24),
                                 "labels": st.lists(elements=texts)}
normalized_signal_parameters = st.fixed_dictionaries(_normalized_signal_parameters)
normalized_signals = st.builds(sumpf.Signal, **_normalized_signal_parameters)
コード例 #56
0
def test_can_create_arrays_of_tuples():
    arr = find(arrays(object, 10, st.tuples(st.integers(), st.integers())),
               lambda x: all(t[0] < t[1] for t in x))
    for a in arr:
        assert a in ((0, 1), (-1, 0))
コード例 #57
0
def test_may_reuse_distinct_integers_if_asked():
    find_any(
        arrays('uint64', 10, distinct_integers(), fill=distinct_integers()),
        lambda x: len(set(x)) < len(x)
    )
コード例 #58
0
ファイル: test_utils.py プロジェクト: kuan-li/MyGrad
    return x**2


def binary_func(x, y):
    return x * y**2


def ternary_func(x, y, z):
    return z * x * y**2


@given(
    data=st.data(),
    x=hnp.arrays(
        shape=hnp.array_shapes(max_side=3, max_dims=3),
        dtype=float,
        elements=st.floats(-10, 10),
    ),
)
def test_finite_difference_no_broadcast(data, x):
    atol, rtol = (1e-2, 1e-2)
    y = data.draw(hnp.arrays(shape=x.shape,
                             dtype=float,
                             elements=st.floats(-100, 100)),
                  label="y")

    z = data.draw(hnp.arrays(shape=x.shape,
                             dtype=float,
                             elements=st.floats(-100, 100)),
                  label="z")
コード例 #59
0
from hypothesis import given
import hypothesis.strategies as st
from hypothesis.extra.numpy import arrays
from rasterio.warp import reproject
from rio_pansharpen.methods import(
    calculateRatio, Brovey)
from rio_pansharpen.utils import(
    _adjust_block_size, _check_crs, _simple_mask,
    _pad_window, _create_apply_mask, _rescale,
    _make_windows, _make_affine, _half_window)


# Testing _calculateRatio function from methods
@given(arrays(np.uint16, (3, 8, 8),
              elements=st.integers(
                min_value=1,
                max_value=np.iinfo('uint16').max)
              ),
       arrays(np.uint16, (3, 8, 8),
              elements=st.integers(
                min_value=1,
                max_value=np.iinfo('uint16').max)
              ),
       st.floats(min_value=0.2, max_value=1.0))
def test_calculateRatio(rgb, pan, weight):
    output = pan / ((rgb[0] + rgb[1] + rgb[2] * weight) / (2 + weight))
    assert np.array_equal(output, calculateRatio(rgb, pan, weight))


# Testing Brovey function from methods
@given(arrays(np.uint16, (3, 8, 8),
コード例 #60
0
"""
import types

import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import integers

from nashpy.learning.fictitious_play import (
    fictitious_play,
    get_best_response_to_play_count,
    update_play_count,
)


@given(M=arrays(np.int8, (4, 5)))
def test_property_find_best_response_to_play_count(M):
    play_count = np.zeros(M.shape[1])
    best_response = get_best_response_to_play_count(M, play_count)
    assert best_response >= 0
    assert best_response <= M.shape[1] - 1


def test_find_best_response_to_play_count():
    M = np.array([[3, 2, 3], [4, 1, 1], [2, 3, 1]])
    play_counts = (
        np.array([1, 0, 0]),
        np.array([2, 1, 0]),
        np.array([0, 0, 2]),
    )
    best_responses = (1, 1, 0)