Example #1
0
def ibm_compatible_floats(draw, min_value=None, max_value=None):
    if min_value is None:
        min_value = MIN_IBM_FLOAT
        
    if max_value is None:
        max_value = MAX_IBM_FLOAT
    
    truncated_min_f = max(min_value, MIN_IBM_FLOAT)
    truncated_max_f = min(max_value, MAX_IBM_FLOAT)

    strategies = []
    if truncated_min_f <= LARGEST_NEGATIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
        strategies.append(floats(truncated_min_f, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))

    if truncated_min_f <= SMALLEST_POSITIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
        strategies.append(floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, truncated_max_f))

    if truncated_min_f <= 0 <= truncated_max_f:
        strategies.append(just(0.0))

    if len(strategies) == 0:
        strategies.append(floats(truncated_min_f, truncated_max_f))

    ibm = draw(one_of(*strategies))
    return ibm
def test_2d_compare_with_numpy(size, nx, xmin, xmax, ny, ymin, ymax):

    if xmax <= xmin or ymax <= ymin:
        return

    x = arrays(np.float, size, elements=st.floats(-1000, 1000)).example()
    y = arrays(np.float, size, elements=st.floats(-1000, 1000)).example()

    try:
        reference = np.histogram2d(x, y, bins=(nx, ny),
                                   range=((xmin, xmax), (ymin, ymax)))[0]
    except:
        # If Numpy fails, we skip the comparison since this isn't our fault
        return

    # First, check the Numpy result because it sometimes doesn't make sense. See
    # bug report https://github.com/numpy/numpy/issues/9435
    n_inside = np.sum((x <= xmax) & (x >= xmin) & (y <= ymax) & (y >= ymin))
    if n_inside != np.sum(reference):
        return

    fast = histogram2d(x, y, bins=(nx, ny),
                       range=((xmin, xmax), (ymin, ymax)))

    print(x, y, nx, xmin, xmax, ny, ymin, ymax)

    np.testing.assert_equal(fast, reference)
Example #3
0
    def _vector_simple_float(self, func, type, data):
        func = always_inline(func)

        size = rffi.sizeof(rffi.DOUBLE)
        myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
        def f(bytecount, va, vb, vc):
            i = 0
            while i < bytecount:
                myjitdriver.jit_merge_point()
                a = raw_storage_getitem(type,va,i)
                b = raw_storage_getitem(type,vb,i)
                c = func(a,b)
                raw_storage_setitem(vc, i, rffi.cast(type,c))
                i += size

        la = data.draw(st.lists(st.floats(), min_size=10, max_size=150))
        l = len(la)
        lb = data.draw(st.lists(st.floats(), min_size=l, max_size=l))

        rawstorage = RawStorage()
        va = rawstorage.new(la, type)
        vb = rawstorage.new(lb, type)
        vc = rawstorage.new(None, type, size=l)
        self.meta_interp(f, [l*size, va, vb, vc], vec=True)

        for i in range(l):
            c = raw_storage_getitem(type,vc,i*size)
            r = rffi.cast(type, func(la[i], lb[i]))
            assert isclose(r, c)

        rawstorage.clear()
Example #4
0
def _inputs(draw):
    N = draw(st.integers(min_value=0, max_value=5))
    D = draw(st.integers(min_value=1, max_value=5))
    # N, D, data, lambda1, lambda2
    return (
        N,
        D,
        draw(st.lists(
            min_size=N * D,
            max_size=N * D,
            elements=st.one_of(
                st.floats(min_value=-10, max_value=1 - TOLERANCE),
                st.floats(min_value=1 + TOLERANCE, max_value=10))
        )),
        draw(st.lists(
            elements=st.one_of(
                st.floats(min_value=-2, max_value=-TOLERANCE),
                st.floats(min_value=TOLERANCE, max_value=2)),
            min_size=D,
            max_size=D,
        )),
        draw(st.lists(
            elements=st.floats(min_value=-2, max_value=2),
            min_size=D,
            max_size=D,
        )),
    )
Example #5
0
def test_power_measurement(instrument, data):
    channel_id = data.draw(sampled_from(instrument.channel_ids))
    channel = instrument.channel(channel_id)
    voltage = data.draw(floats(channel.voltage.protection.min, channel.voltage.protection.max).map(lambda v: round(v, 3)))
    current = data.draw(floats(channel.current.protection.min, channel.current.protection.max).map(lambda v: round(v, 3)))
    instrument._inst._channel_voltage_measurements[channel_id] = voltage
    instrument._inst._channel_current_measurements[channel_id] = current
    assert channel.power.measurement == round(voltage*current, 3)
def test_out_of_range_floats_are_bad():
    with pytest.raises(BadData):
        floats(11, 12).from_basic(floats(0, 1).to_basic((0, 0.0)))

    with pytest.raises(BadData):
        FixedBoundedFloatStrategy(11, 12).from_basic(
            floats().to_basic(float(u'nan'))
        )
Example #7
0
def test_diff_values_float(data):
    x = data.draw(st.just(0), label='x')
    y = data.draw(st.floats(min_value=1e8), label='y')
    z = data.draw(st.floats(max_value=-1e8), label='z')

    assert not are_values_different(x, x)
    assert are_values_different(x, y)
    assert are_values_different(x, z)
    assert are_values_different(y, z)
def dataset_utm_north_down(draw):
    """Generate a fake UTM dataset with an origin, a resolution, and a finite size"""
    x = draw(floats(min_value=-1e6, max_value=1e+6, allow_nan=False, allow_infinity=False))
    y = draw(floats(min_value=-1e6, max_value=1e+6, allow_nan=False, allow_infinity=False))
    res = draw(floats(min_value=0.1, max_value=30, allow_nan=False, allow_infinity=False))
    h = draw(integers(min_value=1, max_value=1000))
    w = draw(integers(min_value=1, max_value=1000))
    return FakeDataset(
        transform=windows.Affine.identity() * windows.Affine.translation(x, y) * windows.Affine.scale(res),
        height=h, width=w)
def test_no_single_floats_in_range():
    low = 2.0 ** 25 + 1
    high = low + 2
    st.floats(low, high).validate()  # Note: OK for 64bit floats
    with pytest.raises(InvalidArgument):
        """Unrepresentable bounds are deprecated; but we're not testing that
        here."""
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            st.floats(low, high, width=32).validate()
Example #10
0
def test_diff_values_array(data):
    a = data.draw(st.lists(elements=st.integers(min_value=0), min_size=1))
    b = data.draw(st.lists(elements=st.integers(max_value=-1), min_size=1))
    c = data.draw(st.lists(elements=st.floats(min_value=1e8), min_size=1))
    d = data.draw(st.lists(elements=st.floats(max_value=-1e8), min_size=1))
    # TODO: Figure out a way to include 0 in lists (arrays)

    assert are_values_different(a, b)
    assert are_values_different(c, d)
    assert not are_values_different(a, a)
Example #11
0
def from_dtype(dtype):
    # type: (np.dtype) -> st.SearchStrategy[Any]
    """Creates a strategy which can generate any value of the given dtype."""
    check_type(np.dtype, dtype, 'dtype')
    # Compound datatypes, eg 'f4,f4,f4'
    if dtype.names is not None:
        # mapping np.void.type over a strategy is nonsense, so return now.
        return st.tuples(
            *[from_dtype(dtype.fields[name][0]) for name in dtype.names])

    # Subarray datatypes, eg '(2, 3)i4'
    if dtype.subdtype is not None:
        subtype, shape = dtype.subdtype
        return arrays(subtype, shape)

    # Scalar datatypes
    if dtype.kind == u'b':
        result = st.booleans()  # type: SearchStrategy[Any]
    elif dtype.kind == u'f':
        if dtype.itemsize == 2:
            result = st.floats(width=16)
        elif dtype.itemsize == 4:
            result = st.floats(width=32)
        else:
            result = st.floats()
    elif dtype.kind == u'c':
        if dtype.itemsize == 8:
            float32 = st.floats(width=32)
            result = st.builds(complex, float32, float32)
        else:
            result = st.complex_numbers()
    elif dtype.kind in (u'S', u'a'):
        # Numpy strings are null-terminated; only allow round-trippable values.
        # `itemsize == 0` means 'fixed length determined at array creation'
        result = st.binary(max_size=dtype.itemsize or None
                           ).filter(lambda b: b[-1:] != b'\0')
    elif dtype.kind == u'u':
        result = st.integers(min_value=0,
                             max_value=2 ** (8 * dtype.itemsize) - 1)
    elif dtype.kind == u'i':
        overflow = 2 ** (8 * dtype.itemsize - 1)
        result = st.integers(min_value=-overflow, max_value=overflow - 1)
    elif dtype.kind == u'U':
        # Encoded in UTF-32 (four bytes/codepoint) and null-terminated
        result = st.text(max_size=(dtype.itemsize or 0) // 4 or None
                         ).filter(lambda b: b[-1:] != u'\0')
    elif dtype.kind in (u'm', u'M'):
        if '[' in dtype.str:
            res = st.just(dtype.str.split('[')[-1][:-1])
        else:
            res = st.sampled_from(TIME_RESOLUTIONS)
        result = st.builds(dtype.type, st.integers(-2**63, 2**63 - 1), res)
    else:
        raise InvalidArgument(u'No strategy inference for {}'.format(dtype))
    return result.map(dtype.type)
Example #12
0
def prob_end_spatial_tournaments(draw, strategies=strategies,
                                min_size=1, max_size=10,
                                min_prob_end=0, max_prob_end=1,
                                min_noise=0, max_noise=1,
                                min_repetitions=1, max_repetitions=20):
    """
    A hypothesis decorator to return a probabilistic ending spatial tournament.

    Parameters
    ----------
    min_size : integer
        The minimum number of strategies to include
    max_size : integer
        The maximum number of strategies to include
    min_prob_end : float
        The minimum probability of a match ending
    max_prob_end : float
        The maximum probability of a match ending
    min_noise : float
        The minimum noise value
    max_noise : float
        The maximum noise value
    min_repetitions : integer
        The minimum number of repetitions
    max_repetitions : integer
        The maximum number of repetitions
    """
    strategies = draw(strategy_lists(strategies=strategies,
                                     min_size=min_size,
                                     max_size=max_size))
    players = [s() for s in strategies]
    player_indices = list(range(len(players)))

    all_potential_edges = list(itertools.combinations(player_indices, 2))
    all_potential_edges.extend([(i, i) for i in player_indices])  # Loops
    edges = draw(lists(sampled_from(all_potential_edges), unique=True,
                       average_size=2 * len(players)))

    # Ensure all players/nodes are connected:
    node_indices = sorted(set([node for edge in edges for node in edge]))
    missing_nodes = [index
                     for index in player_indices if index not in node_indices]
    for index in missing_nodes:
        opponent = draw(sampled_from(player_indices))
        edges.append((index, opponent))

    prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end))
    repetitions = draw(integers(min_value=min_repetitions,
                                max_value=max_repetitions))
    noise = draw(floats(min_value=min_noise, max_value=max_noise))

    tournament = ProbEndSpatialTournament(players, prob_end=prob_end,
                                          repetitions=repetitions,
                                          noise=noise, edges=edges)
    return tournament
Example #13
0
 def valid_phase_type_generator(draw, k):
     ptg = []
     for i in range(k):
         ptg.append(draw(lists(floats(min_value=0, max_value=1),
                                             min_size=k, max_size=k)))
     ptg_array = np.array(ptg)
     ptg_totals = np.array(draw(lists(floats(min_value = 0, max_value=1),
                                             min_size=k, max_size=k))).reshape(-1, 1)
     with np.errstate(invalid='ignore'):
         ptg_norm  = (np.nan_to_num(ptg_array / ptg_array.sum(axis=1).reshape(-1,1))) \
                             * ptg_totals
     return ptg_norm
Example #14
0
def sample_row(draw):
    """Get a sample database row."""
    return OrderedDict((
        ('id', draw(st.integers(min_value=1))),
        ('lat', draw(st.floats(min_value=-180, max_value=180))),
        ('lon', draw(st.floats(min_value=-90, max_value=90))),
        ('country', draw(st.sampled_from(['Cintra', 'Arnor', 'Arrakis', 'Gondor']))),
        ('town', draw(st.sampled_from(['Caer Morhen', 'Minas Tirith', 'Sietch Tabr', 'Gondolin']))),
        ('postcode', draw(st.sampled_from(['123123', '23423', '123122', '43223', '231232']))),
        ('street', draw(st.sampled_from(['street1', 'street2', 'street3', 'street4', 'street5']))),
        ('house', draw(st.integers(max_value=500))),
    ))
Example #15
0
def markov_chains(draw, alphabets=((2, 4), (2, 4), (2, 4))):
    """
    Generate Markov chains for use with hypothesis.

    Parameters
    ----------
    draw : function
        A sampling function passed in by hypothesis.
    alphabets : int, tuple of ints, tuple of pairs of ints
        If an int, it is the length of the chain and each variable is assumed to be binary.
        If a tuple of ints, the ints are assumed to be the size of each variable. If a tuple
        of pairs of ints, each pair represents the min and max alphabet size of each variable.

    Returns
    -------
    dist : Distribution
        A Markov chain with variable sizes.
    """
    try:
        len(alphabets)
        try:
            len(alphabets[0])
        except TypeError:
            alphabets = tuple((alpha, alpha) for alpha in alphabets)
    except TypeError:
        alphabets = ((2, 2),)*alphabets

    alphabets = [int(draw(integers(*alpha))) for alpha in alphabets]

    px = draw(arrays(np.float, shape=alphabets[0], elements=floats(0, 1)))
    cds = [draw(arrays(np.float, shape=(a, b), elements=floats(0, 1))) for a, b in pairwise(alphabets)]

    # assume things
    assume(px.sum() > 0)
    for cd in cds:
        for row in cd:
            assume(row.sum() > 0)

    px /= px.sum()

    # construct dist
    for cd in cds:
        cd /= cd.sum(axis=1, keepdims=True)
        slc = (np.newaxis,)*(len(px.shape)-1) + (colon, colon)
        px = px[..., np.newaxis] * cd[slc]

    dist = Distribution.from_ndarray(px)
    dist.normalize()
    return dist
 def float_range(self, left, right):
     for f in (math.isnan, math.isinf):
         for x in (left, right):
             assume(not f(x))
     left, right = sorted((left, right))
     assert left <= right
     return strategy(floats(left, right))
Example #17
0
def _gen_df(n_rows=1000):
    """Generate a random DataFrame.

    .. note::

        Generating a random value for every row takes too long,
        so we use the same value for every row.
        This may not be the safest way to do it though!
    """
    logger.debug("_gen_df(%s)", n_rows)

    _get_finite_value = functools.partial(_get_numeric_value, filt=np.isfinite)
    data = {
        'integer': np.repeat(_get_finite_value(
            st.integers(
                min_value=np.iinfo(np.int64).min * 0.99,
                max_value=np.iinfo(np.int64).max * 0.99)),
            n_rows),
        'float': np.repeat(_get_finite_value(
            st.floats(
                min_value=np.finfo(np.float64).min * 0.99,
                max_value=np.finfo(np.float64).max * 0.99)),
            n_rows),
        'bool': np.repeat(_get_finite_value(st.booleans()), n_rows),
        'text': np.repeat(
            st.text(alphabet=string.ascii_letters, min_size=0, max_size=100).example(),
            n_rows),
    }
    df = pd.DataFrame(data)
    # print(df.head())
    return df
Example #18
0
def host_json():
    return st.fixed_dictionaries(
        {
            "metadata":
                st.fixed_dictionaries({
                    "update_time": st.floats(),
                    "update_user": st.one_of(st.none(), st.text()),
                    "update_action": st.integers(),
                    "creator": st.text(),
                    "create_time": st.integers(),
                    "update_controller_action": st.text(),
                    "owner": st.one_of(st.none(), st.text()),
                    "command_id": st.one_of(st.none(), st.text(), st.integers()),}),
            "name": st.one_of(st.none(), st.text()),
            "ip": st.one_of(st.none(), st.text()),
            "_rev": st.one_of(st.none(), st.text()),
            "description": st.one_of(st.none(), st.text()),
            "default_gateway": st.one_of(st.none(), st.text()),
            "owned": st.booleans(),
            "services": st.one_of(st.none(), st.integers()),
            "hostnames": st.lists(st.text()),
            "vulns": st.one_of(st.none(), st.integers()),
            "owner": st.one_of(st.none(), st.text()),
            "credentials": st.one_of(st.none(), st.integers()),
            "_id": st.one_of(st.none(), st.integers()),
            "os": st.one_of(st.none(), st.text()),
            "id": st.one_of(st.none(), st.integers()),
            "icon": st.one_of(st.none(), st.text())}
    )
Example #19
0
def test_flatmap_retrieve_from_db():
    constant_float_lists = strategy(floats(0, 1)).flatmap(
        lambda x: lists(just(x))
    )

    track = []

    db = ExampleDatabase()

    @given(constant_float_lists, settings=Settings(database=db))
    def record_and_test_size(xs):
        track.append(xs)
        assert sum(xs) < 1

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track
    example = track[-1]

    while track:
        track.pop()

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track[0] == example
Example #20
0
def python_number(draw, min_val, max_val):
    return draw(st.one_of(st.floats(min_val,
                                    max_val,
                                    allow_nan=False,
                                    allow_infinity=False),
                          st.integers(min_val,
                                      max_val)))
Example #21
0
def _dense_features_map(draw, num_records, **kwargs):
    float_lengths = draw(
        st.lists(
            st.integers(min_value=1, max_value=10),
            min_size=num_records,
            max_size=num_records
        )
    )

    total_length = sum(float_lengths)

    float_keys = draw(
        st.lists(
            st.integers(min_value=1, max_value=100),
            min_size=total_length,
            max_size=total_length,
            unique=True
        )
    )

    float_values = draw(
        st.lists(st.floats(),
                 min_size=total_length,
                 max_size=total_length)
    )

    return [float_lengths, float_keys, float_values]
def test_floats_in_tiny_interval_within_bounds(data, center):
    assume(not (math.isinf(next_down(center)) or math.isinf(next_up(center))))
    lo = Decimal.from_float(next_down(center)).next_plus()
    hi = Decimal.from_float(next_up(center)).next_minus()
    assert float(lo) < lo < center < hi < float(hi)
    val = data.draw(st.floats(lo, hi))
    assert lo < val < hi
Example #23
0
def matches(draw, strategies=axelrod.strategies,
            min_turns=1, max_turns=200,
            min_noise=0, max_noise=1):
    """
    A hypothesis decorator to return a random match as well as a random seed (to
    ensure reproducibility when instance of class need the random library).

    Parameters
    ----------
    strategies : list
        The strategies from which to sample the two the players
    min_turns : integer
        The minimum number of turns
    max_turns : integer
        The maximum number of turns
    min_noise : float
        The minimum noise
    max_noise : float
        The maximum noise

    Returns
    -------
    tuple : a random match as well as a random seed
    """
    seed = draw(random_module())
    strategies = draw(strategy_lists(min_size=2, max_size=2))
    players = [s() for s in strategies]
    turns = draw(integers(min_value=min_turns, max_value=max_turns))
    noise = draw(floats(min_value=min_noise, max_value=max_noise))
    match = axelrod.Match(players, turns=turns, noise=noise)
    return match, seed
Example #24
0
def test_range_check_returns_range_as_is_if_first_is_less_than_second(x, d):
    # Pull data such that the first is less than the second.
    if isinstance(x, float):
        y = d.draw(floats(min_value=x + 1.0, max_value=1E9, allow_nan=False))
    else:
        y = d.draw(integers(min_value=x + 1))
    assert range_check(x, y) == (x, y)
Example #25
0
def matches(
    draw,
    strategies=short_run_time_strategies,
    min_turns=1,
    max_turns=200,
    min_noise=0,
    max_noise=1,
):
    """
    A hypothesis decorator to return a random match.

    Parameters
    ----------
    strategies : list
        The strategies from which to sample the two the players
    min_turns : integer
        The minimum number of turns
    max_turns : integer
        The maximum number of turns
    min_noise : float
        The minimum noise
    max_noise : float
        The maximum noise

    Returns
    -------
    match : a random match
    """
    strategies = draw(strategy_lists(min_size=2, max_size=2))
    players = [s() for s in strategies]
    turns = draw(integers(min_value=min_turns, max_value=max_turns))
    noise = draw(floats(min_value=min_noise, max_value=max_noise))
    match = Match(players, turns=turns, noise=noise)
    return match
def test_small_sum_lists():
    xs = minimal(
        lists(floats()),
        lambda x: len(x) >= 100 and sum(t for t in x if float("inf") > t >= 0) >= 1,
        settings=Settings(average_list_length=200),
    )
    assert 1.0 <= sum(t for t in xs if t >= 0) <= 1.5
Example #27
0
def test_may_fill_with_nan_when_unique_is_set():
    find_any(
        nps.arrays(
            dtype=float, elements=st.floats(allow_nan=False), shape=10,
            unique=True, fill=st.just(float('nan'))),
        lambda x: np.isnan(x).any()
    )
Example #28
0
def _tensor_splits(draw):
    lengths = draw(st.lists(st.integers(1, 5), min_size=1, max_size=10))
    batch_size = draw(st.integers(1, 5))
    element_pairs = [
        (batch, r) for batch in range(batch_size) for r in range(len(lengths))
    ]
    perm = draw(st.permutations(element_pairs))
    perm = perm[:-1]  # skip one range
    ranges = [[(0, 0)] * len(lengths) for _ in range(batch_size)]
    offset = 0
    for pair in perm:
        ranges[pair[0]][pair[1]] = (offset, lengths[pair[1]])
        offset += lengths[pair[1]]

    data = draw(st.lists(
        st.floats(min_value=-1.0, max_value=1.0),
        min_size=offset,
        max_size=offset
    ))

    key = draw(st.permutations(range(offset)))

    return (
        np.array(data).astype(np.float32), np.array(ranges),
        np.array(lengths), np.array(key).astype(np.int64)
    )
Example #29
0
def test_flatmap_retrieve_from_db():
    constant_float_lists = floats(0, 1).flatmap(
        lambda x: lists(just(x))
    )

    track = []

    db = ExampleDatabase()

    @given(constant_float_lists)
    @settings(database=db)
    def record_and_test_size(xs):
        if sum(xs) >= 1:
            track.append(xs)
            assert False

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track
    example = track[-1]
    track = []

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track[0] == example
def reusable():
    return st.one_of(
        st.sampled_from(base_reusable_strategies),

        st.builds(
            st.floats, min_value=st.none() | st.floats(),
            max_value=st.none() | st.floats(), allow_infinity=st.booleans(),
            allow_nan=st.booleans()
        ),

        st.builds(st.just, st.lists(max_size=0)),
        st.builds(st.sampled_from, st.lists(st.lists(max_size=0))),

        st.lists(reusable).map(st.one_of),
        st.lists(reusable).map(lambda ls: st.tuples(*ls)),
    )
Example #31
0
class TestCrossEntropyOps(hu.HypothesisTestCase):
    @given(
        inputs=st.lists(
            elements=st.integers(min_value=1, max_value=5),
            min_size=1,
            max_size=2,
            average_size=2,
        ).flatmap(
            lambda shape: st.tuples(
                hu.arrays(
                    dims=shape,
                    elements=st.one_of(
                        st.floats(min_value=-1.0, max_value=-0.1),
                        st.floats(min_value=0.1, max_value=1.0),
                    )),
                hu.arrays(
                    dims=shape,
                    elements=st.sampled_from([0.0, 1.0]),
                ),
            )
        ),
    )
    def test_sigmoid_cross_entropy_with_logits(self, inputs):
        logits, targets = inputs

        def sigmoid_xentr_logit_ref(logits, targets):
            s = sigmoid_cross_entropy_with_logits(logits, targets)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits',
            ['logits', 'targets'],
            ['xentropy'])
        self.assertReferenceChecks(
            hu.cpu_do,
            op,
            [logits, targets],
            sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

    @given(n=st.integers(2, 10),
           b=st.integers(1, 5),
           **hu.gcs_cpu_only)
    def test_soft_label_cross_entropy(self, n, b, gc, dc):
        # Initialize X and add 1e-2 for numerical stability
        X = np.random.rand(b, n).astype(np.float32)
        X = X + 1e-2
        for i in range(b):
            X[i] = X[i] / np.sum(X[i])

        # Initialize label
        label = np.random.rand(b, n).astype(np.float32)
        for i in range(b):
            label[i] = label[i] / np.sum(label[i])

        # Reference implementation of cross entropy with soft labels
        def soft_label_xentr_ref(X, label):
            xent = [np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
                            for i in range(len(X[0])))) for j in range(b)]
            return (xent,)

        op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])

        # TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
        # tests to this unit test.
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, label],
            reference=soft_label_xentr_ref,
        )

        self.assertGradientChecks(
            gc, op, [X, label], 0, [0], stepsize=1e-4, threshold=1e-2)
Example #32
0
import numpy as np

from hypothesis import assume, given, settings
from hypothesis.strategies import floats
from nxviz.polcart import (
    to_cartesian,
    to_degrees,
    to_polar,
    to_proper_degrees,
    to_proper_radians,
    to_radians,
)


@settings(perform_health_check=False)
@given(floats(), floats())
def test_convert_xy(x, y):
    assume(x != 0 and y != 0)
    assume(np.isfinite(x) and np.isfinite(y))
    assume(abs(x) < 1E6 and abs(y) < 1E6)
    assume(abs(x) > 0.01 and abs(y) > 0.01)

    # Test radians
    r, theta = to_polar(x, y)
    x_new, y_new = to_cartesian(r, theta)
    assert np.allclose(x, x_new)
    assert np.allclose(y, y_new)

    # Test degrees
    r, theta = to_polar(x, y, theta_units="degrees")
    x_new, y_new = to_cartesian(r, theta, theta_units="degrees")
class TestCrossEntropyOps(hu.HypothesisTestCase):
    @given(
        inputs=st.lists(
            elements=st.integers(min_value=1, max_value=5),
            min_size=1,
            max_size=2,
            average_size=2,
        ).flatmap(
            lambda shape: st.tuples(
                hu.arrays(
                    dims=shape,
                    elements=st.one_of(
                        st.floats(min_value=-1.0, max_value=-0.1),
                        st.floats(min_value=0.1, max_value=1.0),
                    )),
                hu.arrays(
                    dims=shape,
                    elements=st.sampled_from([0.0, 1.0]),
                ),
            )
        ),
        options=st.one_of(
            st.tuples(st.just(True), st.just(False)),
            st.tuples(st.just(False), st.just(True)),
            st.tuples(st.just(False), st.just(False))
        ),
        **hu.gcs
    )
    def test_sigmoid_cross_entropy_with_logits(
        self, inputs, options, gc, dc
    ):
        logits, targets = inputs
        log_D_trick, unjoined_lr_loss = options

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = (
                    sigmoid_cross_entropy_with_logits(logits, targets)
                    if not log_D_trick else
                    sigmoid_cross_entropy_with_logits_with_log_D_trick(
                        logits, targets
                    )
                )
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = (
                    sigmoid_cross_entropy_with_logits_grad(fwd_logits, fwd_targets)
                    if not log_D_trick else
                    sigmoid_cross_entropy_with_logits_with_log_D_trick_grad(
                        fwd_logits, fwd_targets
                    )
                )
            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

    @given(
        log_D_trick=st.just(False),
        **hu.gcs_cpu_only
    )
    def test_cross_entropy_and_unjoied_cross_entropy_relation(
        self, log_D_trick, gc, dc
    ):
        logits = np.array([1.4720, 0.3500, -0.6529, -1.1908, 0.8357,
                    -1.0774, -0.3395, -0.2469, 0.6708, -1.8332], dtype='f')
        targets = np.array([1., 1., 1., 1., 1., 1., 0., 0., 0., 0.], dtype='f')
        lr_size = targets.size
        unjoined_lr_loss = False

        def sigmoid_xentr_logit_ref(logits, targets):
            if unjoined_lr_loss:
                s = unjoined_sigmoid_cross_entropy(logits, targets)
            else:
                s = sigmoid_cross_entropy_with_logits(logits, targets)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            if unjoined_lr_loss:
                m = unjoined_sigmoid_cross_entropy_grad(logits, targets)
            else:
                m = sigmoid_cross_entropy_with_logits_grad(
                    fwd_logits, fwd_targets)

            # m = fwd_targets - sigmoid(fwd_logits)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None)

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        output_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        # Unjoined dataset where labels change later
        logits = np.array([1.4720, 0.3500, -0.6529, -1.1908, 0.8357,
                    -1.0774, -0.3395, -0.2469, 0.6708, -1.8332, 1.4720, 0.3500,
                    -0.6529, -1.1908, 0.8357, -1.0774], dtype='f')
        targets = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.,
                            0., 1., 1., 1., 1., 1., 1.], dtype='f')
        unjoined_lr_loss = True
        unjoined_lr_size = targets.size

        op = core.CreateOperator(
            'SigmoidCrossEntropyWithLogits', ['logits', 'targets'],
            ['xentropy'],
            log_D_trick=log_D_trick,
            unjoined_lr_loss=unjoined_lr_loss
        )
        outputs_unjoined_lr = self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets],
            reference=sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=sigmoid_xentr_logit_grad_ref)

        self.assertAlmostEqual(
            output_lr[0].item(0) * lr_size / unjoined_lr_size,
            outputs_unjoined_lr[0].item(0),
            delta=0.0001)

    @given(
        inputs=st.lists(
            elements=st.integers(min_value=1, max_value=5),
            min_size=1,
            max_size=2,
            average_size=2,
        ).flatmap(
            lambda shape: st.tuples(
                hu.arrays(
                    dims=shape,
                    elements=st.one_of(
                        st.floats(min_value=-1.0, max_value=-0.1),
                        st.floats(min_value=0.1, max_value=1.0),
                    )),
                hu.arrays(
                    dims=shape,
                    elements=st.sampled_from([0.0, 1.0]),
                ),
                hu.arrays(
                    dims=shape,
                    elements=st.floats(min_value=0.1, max_value=1.0),
                ),
            )
        ),
        **hu.gcs
    )
    def test_weighted_sigmoid_cross_entropy_with_logits(self, inputs, gc, dc):
        logits, targets, weights = inputs

        def weighted_sigmoid_xentr_logit_ref(logits, targets, weights):
            s = sigmoid_cross_entropy_with_logits(logits, targets)
            s = np.multiply(s, weights)
            m = np.mean(s, axis=len(logits.shape) - 1)
            return (m, )

        def weighted_sigmoid_xentr_logit_grad_ref(g_out, outputs, fwd_inputs):
            fwd_logits, fwd_targets, fwd_weights = fwd_inputs
            inner_size = fwd_logits.shape[-1]
            m = fwd_targets - sigmoid(fwd_logits)
            m = np.multiply(m, weights)
            g_in = -np.expand_dims(g_out, axis=-1) * m / inner_size
            return (g_in, None, None)

        op = core.CreateOperator(
            'WeightedSigmoidCrossEntropyWithLogits',
            ['logits', 'targets', 'weights'],
            ['xentropy'])
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[logits, targets, weights],
            reference=weighted_sigmoid_xentr_logit_ref,
            output_to_grad='xentropy',
            grad_reference=weighted_sigmoid_xentr_logit_grad_ref)

    @given(n=st.integers(2, 10),
           b=st.integers(1, 5),
           **hu.gcs_cpu_only)
    def test_soft_label_cross_entropy(self, n, b, gc, dc):
        # Initialize X and add 1e-2 for numerical stability
        X = np.random.rand(b, n).astype(np.float32)
        X = X + 1e-2
        for i in range(b):
            X[i] = X[i] / np.sum(X[i])

        # Initialize label
        label = np.random.rand(b, n).astype(np.float32)
        for i in range(b):
            label[i] = label[i] / np.sum(label[i])

        # Reference implementation of cross entropy with soft labels
        def soft_label_xentr_ref(X, label):
            xent = [np.sum((-label[j][i] * np.log(max(X[j][i], 1e-20))
                            for i in range(len(X[0])))) for j in range(b)]
            return (xent,)

        op = core.CreateOperator("CrossEntropy", ["X", "label"], ["Y"])

        # TODO(surya) Once CrossEntropyOp is ported to GPU, add the respective
        # tests to this unit test.
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X, label],
            reference=soft_label_xentr_ref,
        )

        self.assertGradientChecks(
            gc, op, [X, label], 0, [0], stepsize=1e-4, threshold=1e-2)
from hypothesis.strategies import just, none, sets, text, basic, lists, \
    binary, builds, floats, one_of, tuples, randoms, booleans, decimals, \
    integers, composite, fractions, recursive, streaming, frozensets, \
    dictionaries, sampled_from, complex_numbers, fixed_dictionaries
from hypothesis.strategytests import mutate_basic, templates_for, \
    strategy_test_suite
from hypothesis.internal.compat import hrange, OrderedDict
from hypothesis.searchstrategy.morphers import MorpherStrategy
from hypothesis.searchstrategy.narytree import n_ary_tree

with Settings(average_list_length=5.0):
    TestIntegerRange = strategy_test_suite(integers(min_value=0, max_value=5))
    TestGiantIntegerRange = strategy_test_suite(
        integers(min_value=(-(2 ** 129)), max_value=(2 ** 129))
    )
    TestFloatRange = strategy_test_suite(floats(min_value=0.5, max_value=10))
    TestSampled10 = strategy_test_suite(sampled_from(elements=list(range(10))))
    TestSampled1 = strategy_test_suite(sampled_from(elements=(1,)))
    TestSampled2 = strategy_test_suite(sampled_from(elements=(1, 2)))

    TestIntegersFrom = strategy_test_suite(integers(min_value=13))
    TestIntegersFrom = strategy_test_suite(integers(min_value=1 << 1024))

    TestOneOf = strategy_test_suite(one_of(
        integers(), integers(), booleans()))

    TestOneOfSameType = strategy_test_suite(
        one_of(
            integers(min_value=1, max_value=10),
            integers(min_value=8, max_value=15),
        )
    assert x + y == y + x


@fails
@given(binary(), binary())
def test_bytes_addition_is_commutative(x, y):
    assert x + y == y + x


@given(integers(), integers(), integers())
def test_int_addition_is_associative(x, y, z):
    assert x + (y + z) == (x + y) + z


@fails
@given(floats(), floats(), floats())
@settings(max_examples=2000,)
def test_float_addition_is_associative(x, y, z):
    assert x + (y + z) == (x + y) + z


@given(lists(integers()))
def test_reversing_preserves_integer_addition(xs):
    assert sum(xs) == sum(reversed(xs))


def test_still_minimizes_on_non_assertion_failures():
    @settings(max_examples=50)
    @given(integers())
    def is_not_too_large(x):
        if x >= 10:
Example #36
0
class SimulatorsActAsFactory(RuleBasedStateMachine):
    def __init__(self):
        super().__init__()
        products, profiles = sample_products, sample_profiles
        self.slow_simulator = SlowFactorySimulator(
            initial_wallet=initial_wallet,
            initial_storage=initial_storage,
            n_steps=n_steps,
            n_products=len(products),
            profiles=profiles,
            max_storage=max_storage,
        )
        self.fast_simulator = FastFactorySimulator(
            initial_wallet=initial_wallet,
            initial_storage=initial_storage,
            n_steps=n_steps,
            n_products=len(products),
            profiles=profiles,
            max_storage=max_storage,
        )
        self.factory = Factory(
            initial_wallet=initial_wallet,
            initial_storage=initial_storage,
            profiles=profiles,
            max_storage=max_storage,
        )
        self.profiles = self.factory.profiles
        self._payments = defaultdict(list)
        self._transports = defaultdict(list)

    profile_indices = Bundle("profile_indices")
    payments = Bundle("payments")
    times = Bundle("times")
    transports = Bundle("transports")

    @rule(target=profile_indices, k=st.integers(len(sample_profiles)))
    def choose_profile(self, k):
        return k

    @rule(targe=payments, payment=st.floats(-30, 30))
    def choose_payment(self, payment):
        return payment

    @rule(targe=times, payment=st.integers(0, n_steps))
    def choose_time(self, t):
        return t

    @rule(targe=transports, storage=storage())
    def choose_transport(self, t):
        return t

    @rule(profile_index=profile_indices, t=times)
    def run_profile(self, profile_index, t):
        job = Job(
            profile=profile_index,
            time=t,
            line=self.profiles[profile_index].line,
            action="run",
            contract=None,
            override=False,
        )
        end = t + self.profiles[profile_index].n_steps
        if end > n_steps - 1:
            return
        self.factory.schedule(job=job, override=False)
        self.slow_simulator.schedule(job=job, override=False)
        self.fast_simulator.schedule(job=job, override=False)

    @rule(payment=payments, t=times)
    def pay(self, payment, t):
        self._payments[t].append(payment)
        self.slow_simulator.pay(payment, t)
        self.fast_simulator.pay(payment, t)

    @rule(trans=transports, t=times)
    def transport(self, trans, t):
        p, q = trans
        self._transports[t].append(trans)
        self.slow_simulator.transport_to(p, q, t)
        self.fast_simulator.transport_to(p, q, t)

    @rule()
    def run_and_test(self):
        for _ in range(n_steps):
            for payment in self._payments[_]:
                self.factory.pay(payment)
            for p, q in self._transports[_]:
                self.factory.transport_to(p, q)
            self.factory.step()
            assert (self.slow_simulator.wallet_at(_) ==
                    self.fast_simulator.wallet_at(_) == self.factory.wallet)
            assert (self.slow_simulator.balance_at(_) ==
                    self.fast_simulator.balance_at(_) == self.factory.balance)
            assert np.all(
                self.slow_simulator.storage_at(_) ==
                self.fast_simulator.storage_at(_))
            assert np.all(
                self.slow_simulator.storage_at(_) == storage_as_array(
                    self.factory.storage, n_products=len(sample_products)))
            assert np.all(
                self.slow_simulator.line_schedules_at(_) ==
                self.factory.line_schedules)
            assert np.all(
                self.fast_simulator.line_schedules_at(_) ==
                self.factory.line_schedules)
def schur_complements(draw,
                      batch_shape=None,
                      event_dim=None,
                      feature_dim=None,
                      feature_ndims=None,
                      enable_vars=None,
                      depth=None):
    """Strategy for drawing `SchurComplement` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `SchurComplement` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    base_kernel, kernel_variable_names = draw(
        kernels(batch_shape=batch_shape,
                event_dim=event_dim,
                feature_dim=feature_dim,
                feature_ndims=feature_ndims,
                enable_vars=False,
                depth=depth - 1))

    # SchurComplement requires the inputs to have one example dimension.
    fixed_inputs = draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=1,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims))
    # Positive shift to ensure the divisor matrix is PD.
    diag_shift = np.float64(
        draw(
            hpnp.arrays(dtype=np.float64,
                        shape=tensorshape_util.as_list(batch_shape),
                        elements=hps.floats(1,
                                            100,
                                            allow_nan=False,
                                            allow_infinity=False))))

    hp.note('Forming SchurComplement kernel with fixed_inputs: {} '
            'and diag_shift: {}'.format(fixed_inputs, diag_shift))

    schur_complement_params = {
        'fixed_inputs': fixed_inputs,
        'diag_shift': diag_shift
    }
    for param_name in schur_complement_params:
        if enable_vars and draw(hps.booleans()):
            kernel_variable_names.append(param_name)
            schur_complement_params[param_name] = tf.Variable(
                schur_complement_params[param_name], name=param_name)
            if draw(hps.booleans()):
                schur_complement_params[
                    param_name] = tfp_hps.defer_and_count_usage(
                        schur_complement_params[param_name])
    result_kernel = tfp.math.psd_kernels.SchurComplement(
        base_kernel=base_kernel,
        fixed_inputs=schur_complement_params['fixed_inputs'],
        diag_shift=schur_complement_params['diag_shift'],
        validate_args=True)
    return result_kernel, kernel_variable_names
Example #38
0
from newton2 import newton2
import numpy as np
import pytest
from hypothesis import given
import hypothesis.strategies as st


@given(st.floats(-10, 10), st.floats(-10, 10))
def test_unique_solution(x0, y0):
    ''' 
    tests if the algorithm correctly finds that the unique zero of the system
      f(x,y) = x = 0
      g(x,y) = y = 0
    is (x,y)=(0,0). The strategy is to use different initial guesses for both x and y.
    '''
    def f1(x, y):
        return x

    def f2(x, y):
        return y

    def f(x, y):
        return [f1(x, y), f2(x, y)]

    def Jf(x, y):
        return [[1, 0], [0, 1]]

    p0 = [x0, y0]
    xn, yn = newton2(f, Jf, p0).x
    assert round(xn, 6) == 0
    assert round(yn, 6) == 0
def kernel_input(draw,
                 batch_shape,
                 example_dim=None,
                 example_ndims=None,
                 feature_dim=None,
                 feature_ndims=None,
                 enable_vars=False,
                 name=None):
    """Strategy for drawing arbitrary Kernel input.

  In order to avoid duplicates (or even numerically near-duplicates), we
  generate inputs on a grid. We let hypothesis generate the number of grid
  points and distance between grid points, within some reasonable pre-defined
  ranges. The result will be a batch of example sets, within which each set of
  examples has no duplicates (but no such duplication avoidance is applied
  accross batches).

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    batch_shape: `TensorShape`. The batch shape of the resulting
      kernel input.
    example_dim: Optional Python int giving the size of each example dimension.
      If omitted, Hypothesis will choose one.
    example_ndims: Optional Python int giving the number of example dimensions
      of the input. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: If `False`, the returned parameters are all Tensors, never
      Variables or DeferredTensor.
    name: Name to give the variable.

  Returns:
    kernel_input: A strategy for drawing kernel_input with the prescribed shape
      (or an arbitrary one if omitted).
  """
    if example_ndims is None:
        example_ndims = draw(hps.integers(min_value=1, max_value=2))
    if example_dim is None:
        example_dim = draw(hps.integers(min_value=2, max_value=4))

    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=1, max_value=2))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=4))

    batch_shape = tensorshape_util.as_list(batch_shape)
    example_shape = [example_dim] * example_ndims
    feature_shape = [feature_dim] * feature_ndims

    batch_size = int(np.prod(batch_shape))
    example_size = example_dim**example_ndims
    feature_size = feature_dim**feature_ndims

    # We would like each batch of examples to be unique, to avoid computing kernel
    # matrices that are semi-definite. hypothesis.extra.numpy.arrays doesn't have
    # a sense of tolerance, so we need to do some extra work to get points
    # sufficiently far from each other.
    grid_size = draw(hps.integers(min_value=10, max_value=100))
    grid_spacing = draw(hps.floats(min_value=1e-2, max_value=2))
    hp.note('Grid size {} and spacing {}'.format(grid_size, grid_spacing))

    def _grid_indices_to_values(grid_indices):
        return (
            grid_spacing *
            (np.array(grid_indices, dtype=np.float64) - np.float64(grid_size)))

    # We'll construct the result by stacking onto flattened batch, example and
    # feature dims, then reshape to unflatten at the end.
    result = np.zeros([0, example_size, feature_size])
    for _ in range(batch_size):
        seen = set()
        index_array_strategy = hps.tuples(
            *([hps.integers(0, grid_size + 1)] * feature_size)).filter(
                lambda x, seen=seen: x not in seen
            )  # Default param to sate pylint.
        examples = np.zeros([1, 0, feature_size])
        for _ in range(example_size):
            feature_grid_locations = draw(index_array_strategy)
            seen.add(feature_grid_locations)
            example = _grid_indices_to_values(feature_grid_locations)
            example = example[np.newaxis, np.newaxis, ...]
            examples = np.concatenate([examples, example], axis=1)
        result = np.concatenate([result, examples], axis=0)
    result = np.reshape(result, batch_shape + example_shape + feature_shape)

    if enable_vars and draw(hps.booleans()):
        result = tf.Variable(result, name=name)
        if draw(hps.booleans()):
            result = tfp_hps.defer_and_count_usage(result)
    return result
from __future__ import absolute_import, division, print_function

import gc
import weakref

import pytest

import hypothesis.strategies as st
from hypothesis import given, settings


@pytest.mark.parametrize(
    "s",
    [
        st.floats(),
        st.tuples(st.integers()),
        st.tuples(),
        st.one_of(st.integers(), st.text()),
    ],
)
def test_is_cacheable(s):
    assert s.is_cacheable


@pytest.mark.parametrize(
    "s",
    [
        st.just([]),
        st.tuples(st.integers(), st.just([])),
        st.one_of(st.integers(), st.text(), st.just([])),
Example #41
0
    def sample_program_configs(self, draw):
        in_shape = draw(
            st.lists(st.integers(min_value=1, max_value=32),
                     min_size=4,
                     max_size=4))
        is_test_val = draw(st.sampled_from([True, False]))
        epsilon = draw(st.floats(min_value=0.00001, max_value=0.001))
        momentum = draw(st.floats(min_value=0.1, max_value=0.9))

        def generate_input(*args, **kwargs):
            return np.random.random(in_shape).astype(np.float32)

        def generate_scale(*args, **kwargs):
            return np.random.random([in_shape[1]]).astype(np.float32) + 0.5

        def generate_bias(*args, **kwargs):
            return np.random.random([in_shape[1]]).astype(np.float32)

        def generate_mean(*args, **kwargs):
            return np.random.random([in_shape[1]]).astype(np.float32)

        def generate_variance(*args, **kwargs):
            return np.random.random([in_shape[1]]).astype(np.float32)

        outputs = [
            "output_data", "mean_data", "variance_data", "saved_mean",
            "saved_variance"
        ]
        if self.get_target() == "Metal":
            outputs = ["output_data"]

        batch_norm_ops = OpConfig(type="batch_norm",
                                  inputs={
                                      "X": ["input_data"],
                                      "Scale": ["scale_data"],
                                      "Bias": ["bias_data"],
                                      "Mean": ["mean_data"],
                                      "Variance": ["variance_data"]
                                  },
                                  outputs={
                                      "Y": ["output_data"],
                                      "MeanOut": ["mean_data"],
                                      "VarianceOut": ["variance_data"],
                                      "SavedMean": ["saved_mean"],
                                      "SavedVariance": ["saved_variance"]
                                  },
                                  attrs={
                                      "is_test": False,
                                      "trainable_statistics": False,
                                      "data_layout": "NCHW",
                                      "use_global_stats": True,
                                      "epsilon": epsilon,
                                      "momentum": momentum
                                  })
        program_config = ProgramConfig(
            ops=[batch_norm_ops],
            weights={},
            inputs={
                "input_data": TensorConfig(data_gen=partial(generate_input)),
                "scale_data": TensorConfig(data_gen=partial(generate_scale)),
                "bias_data": TensorConfig(data_gen=partial(generate_bias)),
                "mean_data": TensorConfig(data_gen=partial(generate_mean)),
                "variance_data":
                TensorConfig(data_gen=partial(generate_variance)),
            },
            outputs=outputs)
        return program_config
class QCircuitMachine(RuleBasedStateMachine):
    """Build a Hypothesis rule based state machine for constructing, transpiling
    and simulating a series of random QuantumCircuits.

    Build circuits with up to QISKIT_RANDOM_QUBITS qubits, apply a random
    selection of gates from qiskit.extensions.standard with randomly selected
    qargs, cargs, and parameters. At random intervals, transpile the circuit for
    a random backend with a random optimization level and simulate both the
    initial and the transpiled circuits to verify that their counts are the
    same.

    """

    qubits = Bundle('qubits')
    clbits = Bundle('clbits')

    backend = Aer.get_backend('qasm_simulator')
    max_qubits = int(backend.configuration().n_qubits / 2)

    def __init__(self):
        super().__init__()
        self.qc = QuantumCircuit()

    @precondition(lambda self: len(self.qc.qubits) < self.max_qubits)
    @rule(target=qubits, n=st.integers(min_value=1, max_value=max_qubits))
    def add_qreg(self, n):
        """Adds a new variable sized qreg to the circuit, up to max_qubits."""
        n = min(n, self.max_qubits - len(self.qc.qubits))
        qreg = QuantumRegister(n)
        self.qc.add_register(qreg)
        return multiple(*list(qreg))

    @rule(target=clbits, n=st.integers(1, 5))
    def add_creg(self, n):
        """Add a new variable sized creg to the circuit."""
        creg = ClassicalRegister(n)
        self.qc.add_register(creg)
        return multiple(*list(creg))

    # Gates of various shapes

    @rule(gate=st.sampled_from(oneQ_gates), qarg=qubits)
    def add_1q_gate(self, gate, qarg):
        """Append a random 1q gate on a random qubit."""
        self.qc.append(gate(), [qarg], [])

    @rule(gate=st.sampled_from(twoQ_gates),
          qargs=st.lists(qubits, max_size=2, min_size=2, unique=True))
    def add_2q_gate(self, gate, qargs):
        """Append a random 2q gate across two random qubits."""
        self.qc.append(gate(), qargs)

    @rule(gate=st.sampled_from(threeQ_gates),
          qargs=st.lists(qubits, max_size=3, min_size=3, unique=True))
    def add_3q_gate(self, gate, qargs):
        """Append a random 3q gate across three random qubits."""
        self.qc.append(gate(), qargs)

    @rule(gate=st.sampled_from(oneQ_oneP_gates),
          qarg=qubits,
          param=st.floats(allow_nan=False,
                          allow_infinity=False,
                          min_value=-10 * pi,
                          max_value=10 * pi))
    def add_1q1p_gate(self, gate, qarg, param):
        """Append a random 1q gate with 1 random float parameter."""
        self.qc.append(gate(param), [qarg])

    @rule(gate=st.sampled_from(oneQ_twoP_gates),
          qarg=qubits,
          params=st.lists(st.floats(allow_nan=False,
                                    allow_infinity=False,
                                    min_value=-10 * pi,
                                    max_value=10 * pi),
                          min_size=2,
                          max_size=2))
    def add_1q2p_gate(self, gate, qarg, params):
        """Append a random 1q gate with 2 random float parameters."""
        self.qc.append(gate(*params), [qarg])

    @rule(gate=st.sampled_from(oneQ_threeP_gates),
          qarg=qubits,
          params=st.lists(st.floats(allow_nan=False,
                                    allow_infinity=False,
                                    min_value=-10 * pi,
                                    max_value=10 * pi),
                          min_size=3,
                          max_size=3))
    def add_1q3p_gate(self, gate, qarg, params):
        """Append a random 1q gate with 3 random float parameters."""
        self.qc.append(gate(*params), [qarg])

    @rule(gate=st.sampled_from(twoQ_oneP_gates),
          qargs=st.lists(qubits, max_size=2, min_size=2, unique=True),
          param=st.floats(allow_nan=False,
                          allow_infinity=False,
                          min_value=-10 * pi,
                          max_value=10 * pi))
    def add_2q1p_gate(self, gate, qargs, param):
        """Append a random 2q gate with 1 random float parameter."""
        self.qc.append(gate(param), qargs)

    @rule(gate=st.sampled_from(twoQ_threeP_gates),
          qargs=st.lists(qubits, max_size=2, min_size=2, unique=True),
          params=st.lists(st.floats(allow_nan=False,
                                    allow_infinity=False,
                                    min_value=-10 * pi,
                                    max_value=10 * pi),
                          min_size=3,
                          max_size=3))
    def add_2q3p_gate(self, gate, qargs, params):
        """Append a random 2q gate with 3 random float parameters."""
        self.qc.append(gate(*params), qargs)

    @rule(gate=st.sampled_from(oneQ_oneC_gates), qarg=qubits, carg=clbits)
    def add_1q1c_gate(self, gate, qarg, carg):
        """Append a random 1q, 1c gate."""
        self.qc.append(gate(), [qarg], [carg])

    @rule(gate=st.sampled_from(variadic_gates),
          qargs=st.lists(qubits, min_size=1, unique=True))
    def add_variQ_gate(self, gate, qargs):
        """Append a gate with a variable number of qargs."""
        self.qc.append(gate(len(qargs)), qargs)

    @precondition(lambda self: len(self.qc.data) > 0)
    @rule(carg=clbits, data=st.data())
    def add_c_if_last_gate(self, carg, data):
        """Modify the last gate to be conditional on a classical register."""
        creg = carg.register
        val = data.draw(st.integers(min_value=0, max_value=2**len(creg) - 1))

        last_gate = self.qc.data[-1]

        # Conditional instructions are not supported
        assume(isinstance(last_gate[0], Gate))

        last_gate[0].c_if(creg, val)

    # Properties to check

    @invariant()
    def qasm(self):
        """After each circuit operation, it should be possible to build QASM."""
        self.qc.qasm()

    @precondition(
        lambda self: any(isinstance(d[0], Measure) for d in self.qc.data))
    @rule(backend=st.one_of(st.none(), st.sampled_from(mock_backends)),
          opt_level=st.integers(min_value=0, max_value=3))
    def equivalent_transpile(self, backend, opt_level):
        """Simulate, transpile and simulate the present circuit. Verify that the
        counts are not significantly different before and after transpilation.

        """

        print('Evaluating circuit at level {} on {}:\n{}'.format(
            opt_level, backend, self.qc.qasm()))

        assume(backend is None
               or backend.configuration().n_qubits >= len(self.qc.qubits))

        shots = 4096

        aer_counts = execute(self.qc, backend=self.backend,
                             shots=shots).result().get_counts()

        try:
            xpiled_qc = transpile(self.qc,
                                  backend=backend,
                                  optimization_level=opt_level)
        except Exception as e:
            failed_qasm = 'Exception caught during transpilation of circuit: \n{}'.format(
                self.qc.qasm())
            raise RuntimeError(failed_qasm) from e

        xpiled_aer_counts = execute(xpiled_qc,
                                    backend=self.backend,
                                    shots=shots).result().get_counts()

        count_differences = dicts_almost_equal(aer_counts, xpiled_aer_counts,
                                               0.05 * shots)

        assert count_differences == '', 'Counts not equivalent: {}\nFailing QASM: \n{}'.format(
            count_differences, self.qc.qasm())
import minitorch
import pytest
from hypothesis import given
from numba import cuda
from hypothesis.strategies import floats, integers, lists, data, permutations
from .strategies import tensors, shaped_tensors, assert_close

small_floats = floats(min_value=-100, max_value=100, allow_nan=False)
v = 4.524423
one_arg = [
    ("neg", lambda a: -a),
    ("addconstant", lambda a: a + v),
    ("lt", lambda a: a < v),
    ("subconstant", lambda a: a - v),
    ("mult", lambda a: 5 * a),
    ("div", lambda a: a / v),
    ("sig", lambda a: a.sigmoid()),
    ("log", lambda a: (a + 100000).log()),
    ("relu", lambda a: (a + 2).relu()),
    ("exp", lambda a: (a - 200).exp()),
]

reduce = [
    ("sum", lambda a: a.sum()),
    ("mean", lambda a: a.mean()),
    ("sum2", lambda a: a.sum(0)),
    ("mean2", lambda a: a.mean(0)),
]
two_arg = [
    ("add", lambda a, b: a + b),
    ("mul", lambda a, b: a * b),
Example #44
0
class TestPairWiseLossOps(hu.HypothesisTestCase):
    @given(X=hu.arrays(dims=[2, 1],
                       elements=st.floats(min_value=0.0, max_value=10.0)),
           label=hu.arrays(dims=[2, 1],
                           elements=st.integers(min_value=0, max_value=1),
                           dtype=np.float32),
           **hu.gcs_cpu_only)
    def test_pair_wise_loss_predictions(self, X, label, gc, dc):
        workspace.FeedBlob('X', X)
        workspace.FeedBlob('label', label)
        new_label = np.array([label[1], label[0]])
        new_x = np.array([X[1], X[0]])
        workspace.FeedBlob('new_x', new_x)
        workspace.FeedBlob('new_label', new_label)
        net = core.Net('net')
        net.PairWiseLoss(['X', 'label'], ['output'])
        net.PairWiseLoss(['new_x', 'new_label'], ['new_output'])
        plan = core.Plan('predict_data')
        plan.AddStep(core.execution_step('predict_data',
                                         [net], num_iter=1))
        workspace.RunPlan(plan)
        output = workspace.FetchBlob('output')
        new_output = workspace.FetchBlob('new_output')
        sign = 1 if label[0] > label[1] else -1
        if label[0] == label[1]:
            self.assertEqual(np.asscalar(output), 0)
            return

        self.assertAlmostEqual(
            np.asscalar(output),
            np.asscalar(np.log(1 + np.exp(sign * (X[1] - X[0])))),
            delta=1e-4
        )
        # check swapping row order doesn't alter overall loss
        self.assertAlmostEqual(output, new_output)

    @given(X=hu.arrays(dims=[2, 1],
                       elements=st.floats(min_value=0.0, max_value=10.0)),
           label=hu.arrays(dims=[2, 1],
                           elements=st.integers(min_value=0, max_value=1),
                           dtype=np.float32),
           dY=hu.arrays(dims=[1],
                        elements=st.floats(min_value=1, max_value=10)),
           **hu.gcs_cpu_only)
    def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
        workspace.FeedBlob('X', X)
        workspace.FeedBlob('dY', dY)
        workspace.FeedBlob('label', label)
        net = core.Net('net')
        net.PairWiseLossGradient(
            ['X', 'label', 'dY'],
            ['dX'],
        )
        plan = core.Plan('predict_data')
        plan.AddStep(core.execution_step('predict_data',
                                         [net], num_iter=1))
        workspace.RunPlan(plan)
        dx = workspace.FetchBlob('dX')
        sign = 1 if label[0] > label[1] else -1
        if label[0] == label[1]:
            self.assertEqual(np.asscalar(dx[0]), 0)
            return
        self.assertAlmostEqual(
            np.asscalar(dx[0]),
            np.asscalar(-dY[0] * sign / (1 + np.exp(sign * (X[0] - X[1])))),
            delta=1e-2 * abs(np.asscalar(dx[0])))

        self.assertEqual(np.asscalar(dx[0]), np.asscalar(-dx[1]))
        delta = 1e-3
        up_x = np.array([[X[0] + delta], [X[1]]], dtype=np.float32)
        down_x = np.array([[X[0] - delta], [X[1]]], dtype=np.float32)
        workspace.FeedBlob('up_x', up_x)
        workspace.FeedBlob('down_x', down_x)
        new_net = core.Net('new_net')
        new_net.PairWiseLoss(['up_x', 'label'], ['up_output'])
        new_net.PairWiseLoss(['down_x', 'label'], ['down_output'])

        plan = core.Plan('predict_data')
        plan.AddStep(core.execution_step('predict_data', [new_net], num_iter=1))
        workspace.RunPlan(plan)
        down_output_pred = workspace.FetchBlob('down_output')
        up_output_pred = workspace.FetchBlob('up_output')
        np.testing.assert_allclose(
            np.asscalar(dx[0]),
            np.asscalar(
                0.5 * dY[0] *
                (up_output_pred[0] - down_output_pred[0]) / delta),
            rtol=1e-2, atol=1e-2)
Example #45
0
import pytest

from hypothesis import given, assume
from hypothesis.errors import UnsatisfiedAssumption
from hypothesis.strategies import integers, floats, one_of, just

from segpy.ibm_float import (ieee2ibm, ibm2ieee, MAX_IBM_FLOAT,
                             SMALLEST_POSITIVE_NORMAL_IBM_FLOAT,
                             LARGEST_NEGATIVE_NORMAL_IBM_FLOAT, MIN_IBM_FLOAT,
                             IBMFloat, EPSILON_IBM_FLOAT,
                             MAX_EXACT_INTEGER_IBM_FLOAT,
                             MIN_EXACT_INTEGER_IBM_FLOAT, EXPONENT_BIAS)

from segpy.util import almost_equal

ibm_compatible_negative_floats = floats(MIN_IBM_FLOAT,
                                        LARGEST_NEGATIVE_NORMAL_IBM_FLOAT)
ibm_compatible_positive_floats = floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT,
                                        MAX_IBM_FLOAT)

ibm_compatible_non_negative_floats = one_of(
    just(0.0), floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, MAX_IBM_FLOAT))

ibm_compatible_non_positive_floats = one_of(
    just(0.0), floats(MIN_IBM_FLOAT, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))


def ibm_compatible_floats(min_f, max_f):
    truncated_min_f = max(min_f, MIN_IBM_FLOAT)
    truncated_max_f = min(max_f, MAX_IBM_FLOAT)

    strategies = []
Example #46
0
    value = attr.ib()  # type: object
    deferred = attr.ib(attr.Factory(Deferred))  # type: Deferred

    def resolve(self):
        """
        Resolve the L{Deferred} that represents the value with the
        value itself.
        """
        self.deferred.callback(self.value)


jsonAtoms = (st.none()
             | st.booleans()
             | st.integers()
             | st.floats(allow_nan=False)
             | st.text(printable))


def jsonComposites(children):
    """
    Creates a Hypothesis strategy that constructs composite
    JSON-serializable objects (e.g., lists).

    @param children: A strategy from which each composite object's
        children will be drawn.

    @return: The composite objects strategy.
    """
    return (st.lists(children)
            | st.dictionaries(st.text(printable), children)
Example #47
0
                dom0.firewall("test-vm", value, "192.168.1.1", "1")


# ~~~ dsthost ~~~
@hypothesis.given(s.one_of(s.integers(), s.text(), s.functions()))
def test__firewall__dsthost__fuzz_negative(value):
    with patch("qsm.dom0.lib.run", return_value=None, autospec=True):
        with patch("qsm.dom0.exists_or_throws",
                   return_value=True,
                   autospec=True):
            with pytest.raises(AssertionError):
                dom0.firewall("test-vm", "accept", value, "1")


# ~~~ dst ports ~~~
@hypothesis.given(s.one_of(s.floats(), s.text(), s.functions()))
def test__firewall__dstports__invalid_type_fuzz_negative(value):
    """Test random types cause assertion error"""
    with patch("qsm.dom0.lib.run", return_value=None, autospec=True):
        with patch("qsm.dom0.exists_or_throws",
                   return_value=True,
                   autospec=True):
            with pytest.raises(AssertionError):
                dom0.firewall("test-vm", "accept", "192.168.1.1", value)


@hypothesis.given(s.integers(min_value=1, max_value=65535))
def test__firewall__dstports__fuzz_positive(value):
    """Test values inside of the acceptable range of ports"""
    with patch("qsm.dom0.lib.run", return_value=None, autospec=True):
        with patch("qsm.dom0.exists_or_throws",
def bijectors(draw,
              bijector_name=None,
              batch_shape=None,
              event_dim=None,
              enable_vars=False):
    """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the whitelist `TF2_FRIENDLY_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if bijector_name is None:
        bijector_name = draw(hps.sampled_from(TF2_FRIENDLY_BIJECTORS))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if bijector_name == 'Invert':
        underlying_name = draw(
            hps.sampled_from(sorted(set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        return tfb.Invert(underlying, validate_args=True)
    if bijector_name == 'TransformDiagonal':
        underlying_name = draw(
            hps.sampled_from(sorted(TRANSFORM_DIAGONAL_WHITELIST)))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=(),
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        return tfb.TransformDiagonal(underlying, validate_args=True)
    if bijector_name == 'Inline':
        if enable_vars:
            scale = tf.Variable(1., name='scale')
        else:
            scale = 2.
        b = tfb.AffineScalar(scale=scale)

        inline = tfb.Inline(
            forward_fn=b.forward,
            inverse_fn=b.inverse,
            forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian(  # pylint: disable=g-long-lambda
                x,
                event_ndims=b.forward_min_event_ndims),
            forward_min_event_ndims=b.forward_min_event_ndims,
            is_constant_jacobian=b.is_constant_jacobian,
        )
        inline.b = b
        return inline
    if bijector_name == 'DiscreteCosineTransform':
        dct_type = draw(hps.integers(min_value=2, max_value=3))
        return tfb.DiscreteCosineTransform(validate_args=True,
                                           dct_type=dct_type)
    if bijector_name == 'PowerTransform':
        power = draw(hps.floats(min_value=0., max_value=10.))
        return tfb.PowerTransform(validate_args=True, power=power)
    if bijector_name == 'Permute':
        event_ndims = draw(hps.integers(min_value=1, max_value=2))
        axis = draw(hps.integers(min_value=-event_ndims, max_value=-1))
        permutation = draw(hps.permutations(np.arange(event_dim)))
        return tfb.Permute(permutation, axis=axis)

    bijector_params = draw(
        broadcasting_params(bijector_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    ctor = getattr(tfb, bijector_name)
    return ctor(validate_args=True, **bijector_params)
 standard_types = [
     lists(max_size=0), tuples(), sets(max_size=0), frozensets(max_size=0),
     fixed_dictionaries({}),
     abc(booleans(), booleans(), booleans()),
     abc(booleans(), booleans(), integers()),
     fixed_dictionaries({'a': integers(), 'b': booleans()}),
     dictionaries(booleans(), integers()),
     dictionaries(text(), booleans()),
     one_of(integers(), tuples(booleans())),
     sampled_from(range(10)),
     one_of(just('a'), just('b'), just('c')),
     sampled_from(('a', 'b', 'c')),
     integers(),
     integers(min_value=3),
     integers(min_value=(-2 ** 32), max_value=(2 ** 64)),
     floats(), floats(min_value=-2.0, max_value=3.0),
     floats(), floats(min_value=-2.0),
     floats(), floats(max_value=-0.0),
     floats(), floats(min_value=0.0),
     floats(min_value=3.14, max_value=3.14),
     text(), binary(),
     booleans(),
     tuples(booleans(), booleans()),
     frozensets(integers()),
     sets(frozensets(booleans())),
     complex_numbers(),
     fractions(),
     decimals(),
     lists(lists(booleans(), average_size=10), average_size=10),
     lists(lists(booleans(), average_size=100)),
     lists(floats(0.0, 0.0), average_size=1.0),
class TestActivations(serial.SerializedTestCase):
    @serial.given(X=hu.tensor(), in_place=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]), **mu.gcs)
    def test_relu(self, X, in_place, engine, gc, dc):
        if gc == mu.mkl_do:
            in_place = False

        op = core.CreateOperator(
            "Relu",
            ["X"],
            ["X"] if in_place else ["Y"],
            engine=engine,
        )

        def relu_ref(X):
            return [np.maximum(X, 0.0)]

        # go away from the origin point to avoid kink problems
        X += 0.02 * np.sign(X)
        X[X == 0.0] += 0.02

        self.assertReferenceChecks(gc, op, [X], relu_ref)
        self.assertDeviceChecks(dc, op, [X], [0])
        self.assertGradientChecks(gc, op, [X], 0, [0])

    @unittest.skipIf(not workspace.has_gpu_support and
                    not workspace.has_hip_support,
                     "Relu for float16 can only run on GPU now.")
    @given(X=hu.tensor(dtype=np.float16), in_place=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]), **hu.gcs)
    def test_relu_fp16(self, X, in_place, engine, gc, dc):
        # fp16 is only supported on CUDA/HIP
        assume(core.IsGPUDeviceType(gc.device_type))
        op = core.CreateOperator(
            "Relu",
            ["X"],
            ["X"] if in_place else ["Y"],
            engine=engine,
        )

        def relu_ref(X):
            return [np.maximum(X, 0.0)]

        def relu_grad_ref(g_out, outputs, fwd_inputs):
            dY = g_out
            [Y] = outputs
            dX = dY
            dX[Y == 0] = 0
            return [dX]

        # go away from the origin point to avoid kink problems
        X += 0.02 * np.sign(X)
        X[X == 0.0] += 0.02

        self.assertReferenceChecks(
            gc,
            op,
            [X],
            relu_ref,
            output_to_grad="X" if in_place else "Y",
            grad_reference=relu_grad_ref)

    @serial.given(X=hu.tensor(elements=st.floats(-3.0, 3.0)),
           n=st.floats(min_value=0.5, max_value=2.0),
           in_place=st.booleans(), **hu.gcs)
    def test_relu_n(self, X, n, in_place, gc, dc):
        op = core.CreateOperator(
            "ReluN",
            ["X"],
            ["X"] if in_place else ["Y"],
            n=n,
        )

        def relu_n_ref(X):
            return [np.minimum(np.maximum(X, 0.0), n)]

        # go away from 0 and n to avoid kink problems
        X += 0.04 * np.sign(X)
        X[X == 0.0] += 0.04
        X -= n
        X += 0.02 * np.sign(X)
        X[X == 0.0] -= 0.02
        X += n

        self.assertReferenceChecks(gc, op, [X], relu_n_ref)
        self.assertDeviceChecks(dc, op, [X], [0])
        self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=0.005)

    @serial.given(X=hu.tensor(),
           alpha=st.floats(min_value=0.1, max_value=2.0),
           in_place=st.booleans(), engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    def test_elu(self, X, alpha, in_place, engine, gc, dc):
        op = core.CreateOperator(
            "Elu",
            ["X"],
            ["X"] if in_place else ["Y"],
            alpha=alpha,
            engine=engine,
        )

        def elu_ref(X):
            Y = X
            Y[X < 0] = alpha * (np.exp(X[X < 0]) - 1.0)
            return [Y]

        # go away from the origin point to avoid kink problems
        X += 0.04 * np.sign(X)
        X[X == 0.0] += 0.04

        self.assertReferenceChecks(gc, op, [X], elu_ref)
        self.assertDeviceChecks(dc, op, [X], [0])
        self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=1e-2)

    @given(X=hu.tensor(min_dim=4, max_dim=4),
           alpha=st.floats(min_value=0.1, max_value=2.0),
           inplace=st.booleans(),
           shared=st.booleans(),
           order=st.sampled_from(["NCHW", "NHWC"]),
           seed=st.sampled_from([20, 100]),
           **hu.gcs)
    def test_prelu(self, X, alpha, inplace, shared, order, seed, gc, dc):
        np.random.seed(seed)
        W = np.random.randn(
            X.shape[1] if order == "NCHW" else X.shape[3]).astype(np.float32)

        if shared:
            W = np.random.randn(1).astype(np.float32)

        # go away from the origin point to avoid kink problems
        X += 0.04 * np.sign(X)
        X[X == 0.0] += 0.04

        def prelu_ref(X, W):
            Y = X.copy()
            W = W.reshape(1, -1, 1, 1) if order == "NCHW" \
                else W.reshape(1, 1, 1, -1)
            assert len(X.shape) == 4
            neg_indices = X <= 0
            assert len(neg_indices.shape) == 4
            assert X.shape == neg_indices.shape
            Y[neg_indices] = (Y * W)[neg_indices]
            return (Y,)

        op = core.CreateOperator(
            "PRelu", ["X", "W"], ["Y" if not inplace else "X"],
            alpha=alpha, order=order)
        self.assertReferenceChecks(gc, op, [X, W], prelu_ref)
        # Check over multiple devices
        self.assertDeviceChecks(dc, op, [X, W], [0])

        if not inplace:
            # Gradient check wrt X
            self.assertGradientChecks(gc, op, [X, W], 0, [0], stepsize=1e-2)
            # Gradient check wrt W
            self.assertGradientChecks(gc, op, [X, W], 1, [0], stepsize=1e-2)

    @serial.given(X=hu.tensor(),
           alpha=st.floats(min_value=0.1, max_value=2.0),
           inplace=st.booleans(),
           **hu.gcs)
    def test_leaky_relu(self, X, alpha, inplace, gc, dc):
        # go away from the origin point to avoid kink problems
        X += 0.04 * np.sign(X)
        X[X == 0.0] += 0.04

        def leaky_relu_ref(X):
            Y = X.copy()
            neg_indices = X <= 0
            Y[neg_indices] = Y[neg_indices] * alpha
            return (Y,)

        op = core.CreateOperator(
            "LeakyRelu",
            ["X"], ["Y" if not inplace else "X"],
            alpha=alpha)
        self.assertReferenceChecks(gc, op, [X], leaky_relu_ref)
        # Check over multiple devices
        self.assertDeviceChecks(dc, op, [X], [0])

    @given(X=hu.tensor(),
           inplace=st.booleans(),
           **hu.gcs)
    def test_leaky_relu_default(self, X, inplace, gc, dc):
        # go away from the origin point to avoid kink problems
        X += 0.04 * np.sign(X)
        X[X == 0.0] += 0.04

        def leaky_relu_ref(X):
            Y = X.copy()
            neg_indices = X <= 0
            Y[neg_indices] = Y[neg_indices] * 0.01
            return (Y,)

        op = core.CreateOperator(
            "LeakyRelu",
            ["X"], ["Y" if not inplace else "X"])
        self.assertReferenceChecks(gc, op, [X], leaky_relu_ref)
        # Check over multiple devices
        self.assertDeviceChecks(dc, op, [X], [0])
Example #51
0
class TestMatch(unittest.TestCase):
    @given(turns=integers(min_value=1, max_value=200), game=games())
    @example(turns=5, game=axl.DefaultGame)
    def test_init(self, turns, game):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), turns, game=game)
        self.assertEqual(match.result, [])
        self.assertEqual(match.players, [p1, p2])
        self.assertEqual(match.turns, turns)
        self.assertEqual(match.prob_end, 0)
        self.assertEqual(match.noise, 0)
        self.assertEqual(match.game.RPST(), game.RPST())

        self.assertEqual(match.players[0].match_attributes["length"], turns)
        self.assertEqual(match._cache, {})

    @given(prob_end=floats(min_value=0, max_value=1), game=games())
    def test_init_with_prob_end(self, prob_end, game):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), prob_end=prob_end, game=game)
        self.assertEqual(match.result, [])
        self.assertEqual(match.players, [p1, p2])
        self.assertEqual(match.turns, float("inf"))
        self.assertEqual(match.prob_end, prob_end)
        self.assertEqual(match.noise, 0)
        self.assertEqual(match.game.RPST(), game.RPST())

        self.assertEqual(match.players[0].match_attributes["length"],
                         float("inf"))
        self.assertEqual(match._cache, {})

    @given(
        prob_end=floats(min_value=0, max_value=1),
        turns=integers(min_value=1, max_value=200),
        game=games(),
    )
    def test_init_with_prob_end_and_turns(self, turns, prob_end, game):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), turns=turns, prob_end=prob_end, game=game)
        self.assertEqual(match.result, [])
        self.assertEqual(match.players, [p1, p2])
        self.assertEqual(match.turns, turns)
        self.assertEqual(match.prob_end, prob_end)
        self.assertEqual(match.noise, 0)
        self.assertEqual(match.game.RPST(), game.RPST())

        self.assertEqual(match.players[0].match_attributes["length"],
                         float("inf"))
        self.assertEqual(match._cache, {})

    def test_default_init(self):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2))
        self.assertEqual(match.result, [])
        self.assertEqual(match.players, [p1, p2])
        self.assertEqual(match.turns, axl.DEFAULT_TURNS)
        self.assertEqual(match.prob_end, 0)
        self.assertEqual(match.noise, 0)
        self.assertEqual(match.game.RPST(), (3, 1, 0, 5))

        self.assertEqual(match.players[0].match_attributes["length"],
                         axl.DEFAULT_TURNS)
        self.assertEqual(match._cache, {})

    def test_example_prob_end(self):
        """
        Test that matches have diff length and also that cache has recorded the
        outcomes
        """
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), prob_end=0.5)
        expected_lengths = [3, 1, 5]
        for seed, expected_length in zip(range(3), expected_lengths):
            axl.seed(seed)
            self.assertEqual(match.players[0].match_attributes["length"],
                             float("inf"))
            self.assertEqual(len(match.play()), expected_length)
            self.assertEqual(match.noise, 0)
            self.assertEqual(match.game.RPST(), (3, 1, 0, 5))
        self.assertEqual(len(match._cache), 1)
        self.assertEqual(match._cache[(p1, p2)], [(C, C)] * 5)

    @given(turns=integers(min_value=1, max_value=200), game=games())
    @example(turns=5, game=axl.DefaultGame)
    def test_non_default_attributes(self, turns, game):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match_attributes = {"length": 500, "game": game, "noise": 0.5}
        match = axl.Match((p1, p2),
                          turns,
                          game=game,
                          match_attributes=match_attributes)
        self.assertEqual(match.players[0].match_attributes["length"], 500)
        self.assertEqual(match.players[0].match_attributes["noise"], 0.5)

    @given(turns=integers(min_value=1, max_value=200))
    @example(turns=5)
    def test_len(self, turns):
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), turns)
        self.assertEqual(len(match), turns)

    def test_len_error(self):
        """
        Length is not defined if it is infinite.
        """
        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), prob_end=0.5)
        with self.assertRaises(TypeError):
            len(match)

    @given(p=floats(min_value=0, max_value=1))
    def test_stochastic(self, p):

        assume(0 < p < 1)

        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), 5)
        self.assertFalse(match._stochastic)

        match = axl.Match((p1, p2), 5, noise=p)
        self.assertTrue(match._stochastic)

        p1 = axl.Random()
        match = axl.Match((p1, p2), 5)
        self.assertTrue(match._stochastic)

    @given(p=floats(min_value=0, max_value=1))
    def test_cache_update_required(self, p):

        assume(0 < p < 1)

        p1, p2 = axl.Cooperator(), axl.Cooperator()
        match = axl.Match((p1, p2), 5, noise=p)
        self.assertFalse(match._cache_update_required)

        cache = DeterministicCache()
        cache.mutable = False
        match = axl.Match((p1, p2), 5, deterministic_cache=cache)
        self.assertFalse(match._cache_update_required)

        match = axl.Match((p1, p2), 5)
        self.assertTrue(match._cache_update_required)

        p1 = axl.Random()
        match = axl.Match((p1, p2), 5)
        self.assertFalse(match._cache_update_required)

    def test_play(self):
        cache = DeterministicCache()
        players = (axl.Cooperator(), axl.Defector())
        match = axl.Match(players, 3, deterministic_cache=cache)
        expected_result = [(C, D), (C, D), (C, D)]
        self.assertEqual(match.play(), expected_result)
        self.assertEqual(cache[(axl.Cooperator(), axl.Defector())],
                         expected_result)

        # a deliberately incorrect result so we can tell it came from the cache
        expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)]
        cache[(axl.Cooperator(), axl.Defector())] = expected_result
        match = axl.Match(players, 3, deterministic_cache=cache)
        self.assertEqual(match.play(), expected_result[:3])

    def test_cache_grows(self):
        """
        We want to make sure that if we try to use the cache for more turns than
        what is stored, then it will instead regenerate the result and overwrite
        the cache.
        """
        cache = DeterministicCache()
        players = (axl.Cooperator(), axl.Defector())
        match = axl.Match(players, 3, deterministic_cache=cache)
        expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)]
        expected_result_3_turn = [(C, D), (C, D), (C, D)]
        self.assertEqual(match.play(), expected_result_3_turn)
        match.turns = 5
        self.assertEqual(match.play(), expected_result_5_turn)
        # The cache should now hold the 5-turn result..
        self.assertEqual(cache[(axl.Cooperator(), axl.Defector())],
                         expected_result_5_turn)

    def test_cache_doesnt_shrink(self):
        """
        We want to make sure that when we access the cache looking for fewer
        turns than what is stored, then it will not overwrite the cache with the
        shorter result.
        """
        cache = DeterministicCache()
        players = (axl.Cooperator(), axl.Defector())
        match = axl.Match(players, 5, deterministic_cache=cache)
        expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)]
        expected_result_3_turn = [(C, D), (C, D), (C, D)]
        self.assertEqual(match.play(), expected_result_5_turn)
        match.turns = 3
        self.assertEqual(match.play(), expected_result_3_turn)
        # The cache should still hold the 5.
        self.assertEqual(cache[(axl.Cooperator(), axl.Defector())],
                         expected_result_5_turn)

    def test_scores(self):
        player1 = axl.TitForTat()
        player2 = axl.Defector()
        match = axl.Match((player1, player2), 3)
        self.assertEqual(match.scores(), [])
        match.play()
        self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)])

    def test_final_score(self):
        player1 = axl.TitForTat()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), 3)
        self.assertEqual(match.final_score(), None)
        match.play()
        self.assertEqual(match.final_score(), (2, 7))

        match = axl.Match((player2, player1), 3)
        self.assertEqual(match.final_score(), None)
        match.play()
        self.assertEqual(match.final_score(), (7, 2))

    def test_final_score_per_turn(self):
        turns = 3
        player1 = axl.TitForTat()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.final_score_per_turn(), None)
        match.play()
        self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns))

        match = axl.Match((player2, player1), turns)
        self.assertEqual(match.final_score_per_turn(), None)
        match.play()
        self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns))

    def test_winner(self):
        player1 = axl.TitForTat()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), 3)
        self.assertEqual(match.winner(), None)
        match.play()
        self.assertEqual(match.winner(), player2)

        match = axl.Match((player2, player1), 3)
        self.assertEqual(match.winner(), None)
        match.play()
        self.assertEqual(match.winner(), player2)

        player1 = axl.Defector()
        match = axl.Match((player1, player2), 3)
        self.assertEqual(match.winner(), None)
        match.play()
        self.assertEqual(match.winner(), False)

    def test_cooperation(self):
        turns = 3
        player1 = axl.Cooperator()
        player2 = axl.Alternator()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.cooperation(), None)
        match.play()
        self.assertEqual(match.cooperation(), (3, 2))

        player1 = axl.Alternator()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.cooperation(), None)
        match.play()
        self.assertEqual(match.cooperation(), (2, 0))

    def test_normalised_cooperation(self):
        turns = 3
        player1 = axl.Cooperator()
        player2 = axl.Alternator()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.normalised_cooperation(), None)
        match.play()
        self.assertEqual(match.normalised_cooperation(),
                         (3 / turns, 2 / turns))

        player1 = axl.Alternator()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.normalised_cooperation(), None)
        match.play()
        self.assertEqual(match.normalised_cooperation(),
                         (2 / turns, 0 / turns))

    def test_state_distribution(self):
        turns = 3
        player1 = axl.Cooperator()
        player2 = axl.Alternator()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.state_distribution(), None)

        match.play()
        expected = Counter({(C, C): 2, (C, D): 1})
        self.assertEqual(match.state_distribution(), expected)

        player1 = axl.Alternator()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.state_distribution(), None)

        match.play()
        expected = Counter({(C, D): 2, (D, D): 1})
        self.assertEqual(match.state_distribution(), expected)

    def test_normalised_state_distribution(self):
        turns = 3
        player1 = axl.Cooperator()
        player2 = axl.Alternator()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.normalised_state_distribution(), None)

        match.play()
        expected = Counter({(C, C): 2 / turns, (C, D): 1 / turns})
        self.assertEqual(match.normalised_state_distribution(), expected)

        player1 = axl.Alternator()
        player2 = axl.Defector()

        match = axl.Match((player1, player2), turns)
        self.assertEqual(match.normalised_state_distribution(), None)

        match.play()
        expected = Counter({(C, D): 2 / turns, (D, D): 1 / turns})
        self.assertEqual(match.normalised_state_distribution(), expected)

    def test_sparklines(self):
        players = (axl.Cooperator(), axl.Alternator())
        match = axl.Match(players, 4)
        match.play()
        expected_sparklines = "████\n█ █ "
        self.assertEqual(match.sparklines(), expected_sparklines)
        expected_sparklines = "XXXX\nXYXY"
        self.assertEqual(match.sparklines("X", "Y"), expected_sparklines)
    def sample_program_configs(self, draw):
        in_shape = draw(
            st.lists(st.integers(min_value=1, max_value=20),
                     min_size=2,
                     max_size=5))
        fill_constant_shape = draw(
            st.lists(st.integers(min_value=1, max_value=20),
                     min_size=2,
                     max_size=5))

        axis = draw(
            st.integers(min_value=-1,
                        max_value=max(len(in_shape),
                                      len(fill_constant_shape))))

        out_shape = []
        assume(
            check_input_shape_available(in_shape_x=in_shape,
                                        in_shape_y=fill_constant_shape,
                                        axis=axis,
                                        out_shape=out_shape) == True)
        assume(out_shape == in_shape)

        threshold = draw(st.floats(min_value=0, max_value=1))
        scale = draw(st.floats(min_value=0.5, max_value=5))
        offset = draw(st.floats(min_value=0, max_value=1))

        hard_swish_op0 = OpConfig(type="hard_swish",
                                  inputs={"X": ["input_data"]},
                                  outputs={"Out": ["hard_swish_output_data"]},
                                  attrs={
                                      "threshold": threshold,
                                      "scale": scale,
                                      "offset": offset
                                  })

        fill_constant_op = OpConfig(
            type="fill_constant",
            inputs={},
            outputs={"Out": ["fill_constant_output_data"]},
            attrs={
                "dtype": 5,
                "shape": fill_constant_shape,
                "value": 1.,
                "force_cpu": False,
                "place_type": -1
            })

        elementwise_mul_op = OpConfig(
            type="elementwise_mul",
            inputs={
                "X": ["hard_swish_output_data"],
                "Y": ["fill_constant_output_data"]
            },
            outputs={"Out": ["elementwise_mul_output_data"]},
            attrs={"axis": axis})

        hard_swish_op1 = OpConfig(
            type="hard_swish",
            inputs={"X": ["elementwise_mul_output_data"]},
            outputs={"Out": ["output_data"]},
            attrs={
                "threshold": threshold,
                "scale": scale,
                "offset": offset
            })

        ops = [
            hard_swish_op0, fill_constant_op, elementwise_mul_op,
            hard_swish_op1
        ]
        program_config = ProgramConfig(
            ops=ops,
            weights={},
            inputs={"input_data": TensorConfig(shape=in_shape)},
            outputs=["output_data"])
        return program_config
Example #53
0
def ndarrays_of_shape(shape, lo=-1000.0, hi=1000.0):
    return arrays('float32', shape=shape, elements=floats(min_value=lo, max_value=hi))
Example #54
0
""" test primitive space contains, discretizing and sampling """

import hypothesis as hp
import hypothesis.strategies as st
import pytest

from energypy.common.spaces import ContinuousSpace, DiscreteSpace


@hp.given(st.floats(min_value=-100, max_value=0),
          st.floats(min_value=1, max_value=100), st.integers(1, 10))
def test_continuous(low, high, num_discrete):
    space = ContinuousSpace('state', low, high, None)

    assert space.low == low
    assert space.high == high

    assert space.contains(low)
    assert space.contains((low + high / 2))
    assert space.contains(high)

    with pytest.raises(ValueError):
        assert space.contains(low - 2)
        assert space.contains(high + 2)

    discrete = space.discretize(num_discrete)
    assert len(discrete) == num_discrete


@hp.given(
    st.integers(min_value=6, max_value=100), )
Example #55
0
Test properties of the coordinate transforms in the field vector module to
test if everything has been correctly implemented.
"""
import numpy as np
import json
from hypothesis import given, settings
from hypothesis.strategies import floats
from hypothesis.strategies import tuples

from qcodes.math.field_vector import FieldVector
from qcodes.utils.helpers import NumpyJSONEncoder

random_coordinates = {
    "cartesian":
    tuples(
        floats(min_value=0, max_value=1),  # x
        floats(min_value=0, max_value=1),  # y
        floats(min_value=0, max_value=1)  # z
    ),
    "spherical":
    tuples(
        floats(min_value=0, max_value=1),  # r
        floats(min_value=0, max_value=180),  # theta
        floats(min_value=0, max_value=180)  # phi
    ),
    "cylindrical":
    tuples(
        floats(min_value=0, max_value=1),  # rho
        floats(min_value=0, max_value=180),  # phi
        floats(min_value=0, max_value=1)  # z
    )
Example #56
0
    data,
)

from cattr import Converter

from . import (
    primitive_strategies,
    seqs_of_primitives,
    lists_of_primitives,
    dicts_of_primitives,
    enums_of_primitives,
)
from ._compat import change_type_param

ints_and_type = tuples(integers(), just(int))
floats_and_type = tuples(floats(allow_nan=False), just(float))
strs_and_type = tuples(text(), just(unicode))
bytes_and_type = tuples(binary(), just(bytes))

primitives_and_type = one_of(ints_and_type, floats_and_type, strs_and_type,
                             bytes_and_type)

mut_set_types = sampled_from([Set, MutableSet])
set_types = one_of(mut_set_types, just(FrozenSet))


def create_generic_type(generic_types, param_type):
    """Create a strategy for generating parameterized generic types."""
    return one_of(
        generic_types,
        generic_types.map(lambda t: t[Any]),
Example #57
0

def assert_allclose_na(a, b):
    """assert_allclose with a broader NA/nan/None definition."""
    if _is_na(a) and _is_na(b):
        pass
    else:
        npt.assert_allclose(a, b)


@pytest.mark.parametrize("op, pandas_op", [(sum_op, pd.Series.sum),
                                           (prod_op, pd.Series.prod)])
@settings(deadline=timedelta(milliseconds=1000))
@given(
    data=st.lists(
        st.one_of(st.floats(max_value=10.0, min_value=-10), st.none())),
    skipna=st.booleans(),
)
def test_reduce_op(data, skipna, op, pandas_op):
    arrow = pa.array(data, type=pa.float64(), from_pandas=True)
    pandas = pd.Series(data, dtype=float)

    assert_allclose_na(op(arrow, skipna), pandas_op(pandas, skipna=skipna))

    # Split in the middle and check whether this still works
    if len(data) > 2:
        arrow = pa.chunked_array([
            pa.array(data[:len(data) // 2],
                     type=pa.float64(),
                     from_pandas=True),
            pa.array(data[len(data) // 2:],
Example #58
0
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import floats
from pytest import approx

from coxeter.shape_classes.circle import Circle


@given(floats(0.1, 1000))
def test_perimeter(r):
    circle = Circle(1)
    circle.radius = r
    assert circle.perimeter == 2 * np.pi * r
    assert circle.circumference == 2 * np.pi * r


@given(floats(0.1, 1000))
def test_area(r):
    circle = Circle(1)
    circle.radius = r
    assert circle.area == np.pi * r**2


@given(floats(0.1, 1000))
def test_set_area(area):
    """Test setting the area."""
    circle = Circle(1)
    circle.area = area
    assert circle.area == approx(area)
def test_caches_floats_sensitively():
    assert st.floats(min_value=0.0) is st.floats(min_value=0.0)
    assert st.floats(min_value=0.0) is not st.floats(min_value=0)
    assert st.floats(min_value=0.0) is not st.floats(min_value=-0.0)
Example #60
0
def _check_style(style: str) -> None:
    if style not in ("pytest", "unittest"):
        raise InvalidArgument(f"Valid styles are 'pytest' or 'unittest', got {style!r}")


# Simple strategies to guess for common argument names - we wouldn't do this in
# builds() where strict correctness is required, but we only use these guesses
# when the alternative is nothing() to force user edits anyway.
#
# This table was constructed manually after skimming through the documentation
# for the builtins and a few stdlib modules.  Future enhancements could be based
# on analysis of type-annotated code to detect arguments which almost always
# take values of a particular type.
_GUESS_STRATEGIES_BY_NAME = (
    (st.text(), ["name", "filename", "fname"]),
    (st.floats(), ["real", "imag"]),
    (st.functions(), ["function", "func", "f"]),
    (st.iterables(st.integers()) | st.iterables(st.text()), ["iterable"]),
)


def _strategy_for(param: inspect.Parameter) -> Union[st.SearchStrategy, InferType]:
    # If our default value is an Enum or a boolean, we assume that any value
    # of that type is acceptable.  Otherwise, we only generate the default.
    if isinstance(param.default, bool):
        return st.booleans()
    if isinstance(param.default, enum.Enum):
        return st.sampled_from(type(param.default))
    if param.default is not inspect.Parameter.empty:
        # Using `st.from_type(type(param.default))` would  introduce spurious
        # failures in cases like the `flags` argument to regex functions.