Example #1
0
 def example_generator(ex: ArgsKwargsType) -> None:
     for validator in validators:
         try:
             validator(*ex[0], **ex[1])
         except Exception:
             hypothesis.reject()
     examples.append(ex)
def test_arbitrary_data_frames(data):
    columns = data.draw(st.lists(
        column_strategy(),
        unique_by=lambda c: c.name if c.name is not None else float('nan')
    ))

    try:
        df = data.draw(pdst.data_frames(columns))
    except Exception as e:
        if type(e).__name__ == 'OutOfBoundsDatetime':
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    data_frame_columns = list(df)

    assert len(data_frame_columns) == len(columns)

    for i, (c, n) in enumerate(zip(columns, df)):
        if c.name is None:
            assert n == i
        else:
            assert c.name == n

    for i, c in enumerate(columns):
        column_name = data_frame_columns[i]
        values = df[column_name]
        if c.unique:
            assert len(set(values)) == len(values)
Example #3
0
def test_parse_duration_input(
    hours,
    minutes,
    seconds,
):
    """Assert time input parsing is correct."""
    wanted_seconds = 0
    input_str = ""
    if hours is None:
        hours = 0
    if minutes is None:
        minutes = 0
    if seconds is None:
        seconds = 0

    input_str += f"{hours}h"
    input_str += f"{minutes}m"
    input_str += f"{seconds}s"

    total_seconds = (60.0 * 60 * hours + 60 * minutes + seconds)

    try:
        assert meditate.parse_duration_input(
            input_str=input_str, ) == total_seconds
    except meditate.DurationConfigurationException as e:
        reject()
Example #4
0
def test_no_exceptions(
    source_code,
    default_options,
    parse_ast_tree,
    parse_tokens,
):
    """
    This testcase is a complex example of magic.

    We use property based-test to generate python programs for us.
    And then we ensure that our linter does not crash on arbitary input.
    """
    try:
        tree = parse_ast_tree(str(source_code.encode('utf-8-sig')))
    except (UnicodeEncodeError, SyntaxError):
        reject()
        raise

    lines = io.StringIO(source_code)
    tokens = list(tokenize.generate_tokens(lambda: next(lines)))

    Checker.parse_options(default_options)
    checker = Checker(tree, tokens)

    for violation in checker.run():
        assert isinstance(violation[0], int)
        assert isinstance(violation[1], int)
        assert violation[2].startswith('WPS'), violation[2]
        assert 'WPS0' not in violation[2]
        assert violation[3] == Checker
def test_arbitrary_data_frames(data):
    columns = data.draw(
        st.lists(
            column_strategy(),
            unique_by=lambda c: c.name if c.name is not None else float("nan"),
        )
    )

    try:
        df = data.draw(pdst.data_frames(columns))
    except Exception as e:
        if type(e).__name__ == "OutOfBoundsDatetime":
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    data_frame_columns = list(df)

    assert len(data_frame_columns) == len(columns)

    for i, (c, n) in enumerate(zip(columns, df)):
        if c.name is None:
            assert n == i
        else:
            assert c.name == n

    for i, c in enumerate(columns):
        column_name = data_frame_columns[i]
        values = df[column_name]
        if c.unique:
            assert len(set(values)) == len(values)
    def test_compression_is_invertible(self, string):
        # not ready for unicode
        if any(ord(x) > 255 or ord(x) & 0x80 for x in string):
            reject()

        # can't have null terminated string
        if any(ord(x) == 0x00 for x in string):
            reject()

        assume(len(string) > 0 and len(string) < 100)
        length = len(string)
        original = (ctypes.c_uint8 * length)()
        out = (ctypes.c_uint8 * length)()
        for i, x in enumerate(string):
            original[i] = ord(x)
        compressor = Compressor(compressor_successor_table, packs)
        decompressor = Decompressor(decompressor_successor_table, packs, min_char)
        out_length = compressor.compress(original, length, out, length)

        assert out_length <= length

        out2 = (ctypes.c_uint8 * length)()
        out2_length = decompressor.decompress(out, out_length, out2, length)

        final = ""
        for i in range(out2_length):
            final += chr(out2[i])

        assert out2_length == length
        assert final == string
Example #7
0
def test_convert_python_file_to_module_qualname(project_root_path,
                                                python_file):
    #try:
    if any(x in project_root_path
           for x in ("+", ")", "(", "[", "]", "-", "*", "$", "?")):
        reject()
    convert_python_file_to_module_qualname(project_root_path, python_file)
Example #8
0
def test_fuzz_hanging_indent_with_parentheses(
    statement,
    imports,
    white_space,
    indent,
    line_length,
    comments,
    line_separator,
    comment_prefix,
    include_trailing_comma,
    remove_comments,
):
    try:
        isort.wrap_modes.hanging_indent_with_parentheses(
            statement=statement,
            imports=imports,
            white_space=white_space,
            indent=indent,
            line_length=line_length,
            comments=comments,
            line_separator=line_separator,
            comment_prefix=comment_prefix,
            include_trailing_comma=include_trailing_comma,
            remove_comments=remove_comments,
        )
    except ValueError:
        reject()
def test_reusable_strategies_are_all_reusable(s):
    try:
        s.validate()
    except InvalidArgument:
        reject()

    assert s.has_reusable_values
def test_reusable_strategies_are_all_reusable(s):
    try:
        s.validate()
    except InvalidArgument:
        reject()

    assert s.has_reusable_values
Example #11
0
 def reject_invalid_code(source_code: str, mode: str) -> None:
     """Input validation helper shared by modules, statements, and expressions."""
     # We start by compiling our source code to byte code, and rejecting inputs
     # where this fails.  This is to guard against spurious failures due to
     # e.g. `eval` only being a keyword in Python 3.7
     assert mode in {"eval", "exec", "single"}
     hypothesis.note(source_code)
     try:
         compile(source_code, "<string>", mode)
     except Exception:
         # We're going to check here that libCST also rejects this string.
         # If libCST parses it's a test failure; if not we reject this input
         # so Hypothesis spends as little time as possible exploring invalid
         # code. (usually I'd use a custom mutator, but this is free so...)
         parser = dict(
             eval=libcst.parse_expression,
             exec=libcst.parse_module,
             single=libcst.parse_statement,
         )
         try:
             tree = parser[mode](source_code)
             msg = f"libCST parsed a string rejected by compile() into {tree!r}"
             assert False, msg
         except Exception:
             hypothesis.reject()
         assert False, "unreachable"
Example #12
0
def test_fuzz_vertical_prefix_from_module_import(
    statement,
    imports,
    white_space,
    indent,
    line_length,
    comments,
    line_separator,
    comment_prefix,
    include_trailing_comma,
    remove_comments,
):
    try:
        isort.wrap_modes.vertical_prefix_from_module_import(
            statement=statement,
            imports=imports,
            white_space=white_space,
            indent=indent,
            line_length=line_length,
            comments=comments,
            line_separator=line_separator,
            comment_prefix=comment_prefix,
            include_trailing_comma=include_trailing_comma,
            remove_comments=remove_comments,
        )
    except ValueError:
        reject()
Example #13
0
def test_fuzz_floats_bounds(data):
    bound = none() | floats(allow_nan=False)
    low, high = data.draw(tuples(bound, bound), label="low, high")
    if low is not None and high is not None and low > high:
        low, high = high, low
    exmin = low is not None and low != inf and data.draw(booleans(),
                                                         label="ex_min")
    exmax = high is not None and high != -inf and data.draw(booleans(),
                                                            label="ex_max")
    try:
        val = data.draw(floats(low, high, exclude_min=exmin,
                               exclude_max=exmax),
                        label="value")
        assume(val)  # positive/negative zero is an issue
    except (InvalidArgument, HypothesisDeprecationWarning):
        assert ((exmin and exmax and low == next_down(high))
                or (low == high and (exmin or exmax))
                or (low == high == 0 and copysign(1.0, low) == 1
                    and copysign(1.0, high) == -1))
        reject()  # no floats in required range
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if exmin:
        assert low != val
    if exmax:
        assert high != val
Example #14
0
 def setup(self, filename):
     mock_process = MagicMock()
     try:
         self.running_bag = prb.BagPlayer(filename)
     except prb.MissingBagError:
         hyp.reject()
     self.running_bag.process = mock_process
def test_always_reduces_integers_to_smallest_suitable_sizes(problem):
    n, blob = problem
    blob = hbytes(blob)
    try:
        d = ConjectureData.for_buffer(blob)
        k = d.draw(st.integers())
        stop = blob[len(d.buffer)]
    except (StopTest, IndexError):
        reject()

    assume(k > n)
    assume(stop > 0)

    def f(data):
        k = data.draw(st.integers())
        data.output = repr(k)
        if data.draw_bits(8) == stop and k >= n:
            data.mark_interesting()

    runner = ConjectureRunner(
        f,
        random=Random(0),
        settings=settings(
            suppress_health_check=HealthCheck.all(),
            phases=(Phase.shrink, ),
            database=None,
            verbosity=Verbosity.debug,
        ),
        database_key=None,
    )

    runner.cached_test_function(blob)

    assert runner.interesting_examples

    v, = runner.interesting_examples.values()

    shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING)

    shrinker.fixate_shrink_passes(["minimize_individual_blocks"])

    v = shrinker.shrink_target

    m = ConjectureData.for_buffer(v.buffer).draw(st.integers())
    assert m == n

    # Upper bound on the length needed is calculated as follows:
    # * We have an initial byte at the beginning to decide the length of the
    #   integer.
    # * We have a terminal byte as the stop value.
    # * The rest is the integer payload. This should be n. Including the sign
    #   bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only
    #   have power of two sizes, so it may be up to a factor of two more than
    #   that.
    bits_needed = 1 + n.bit_length()
    actual_bits_needed = min(
        [s for s in WideRangeIntStrategy.sizes if s >= bits_needed])
    bytes_needed = actual_bits_needed // 8
    # 3 extra bytes: two for the sampler, one for the capping value.
    assert len(v.buffer) == 3 + bytes_needed
Example #16
0
def test_arbitrary_data_frames(data):
    columns = data.draw(
        st.lists(
            column_strategy(),
            unique_by=lambda c: c.name if c.name is not None else float("nan"),
        )
    )

    try:
        # Use raw data to work around pandas bug in repr. See
        # https://github.com/pandas-dev/pandas/issues/27484
        df = data.conjecture_data.draw(pdst.data_frames(columns))
    except Exception as e:
        if type(e).__name__ == "OutOfBoundsDatetime":
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    data_frame_columns = list(df)

    assert len(data_frame_columns) == len(columns)

    for i, (c, n) in enumerate(zip(columns, df)):
        if c.name is None:
            assert n == i
        else:
            assert c.name == n

    for i, c in enumerate(columns):
        column_name = data_frame_columns[i]
        values = df[column_name]
        if c.unique:
            assert len(set(values)) == len(values)
def test_reusable_strategies_are_all_reusable(s):
    try:
        s.validate()
    except (InvalidArgument, HypothesisDeprecationWarning):
        reject()

    assert s.has_reusable_values
def test_minmax_magnitude_equal(data, mag):
    val = data.draw(st.complex_numbers(min_magnitude=mag, max_magnitude=mag))
    try:
        assert math.isclose(abs(val), mag)
    except OverflowError:
        reject()
    except AttributeError:
        pass  # Python 2.7.3 does not have math.isclose
Example #19
0
def test_minmax_magnitude_equal(data, mag):
    val = data.draw(st.complex_numbers(min_magnitude=mag, max_magnitude=mag))
    try:
        # Cap magnitude at 10e300 to avoid float overflow, and imprecision
        # at very large exponents (which makes math.isclose fail)
        assert math.isclose(abs(val), mag)
    except OverflowError:
        reject()
Example #20
0
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        type = draw(type)
    elif not isinstance(type, pa.DataType):
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size, )

    if pa.types.is_list(type):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(type.value_type, size=int(offsets.sum())))
        return pa.ListArray.from_arrays(offsets, values)

    if pa.types.is_struct(type):
        h.assume(len(type) > 0)
        names, child_arrays = [], []
        for field in type:
            names.append(field.name)
            child_arrays.append(draw(arrays(field.type, size=size)))
        # fields' metadata are lost here, because from_arrays doesn't accept
        # a fields argumentum, only names
        return pa.StructArray.from_arrays(child_arrays, names=names)

    if (pa.types.is_boolean(type) or pa.types.is_integer(type)
            or pa.types.is_floating(type)):
        values = npst.arrays(type.to_pandas_dtype(), shape=(size, ))
        return pa.array(draw(values), type=type)

    if pa.types.is_null(type):
        value = st.none()
    elif pa.types.is_time(type):
        value = st.times()
    elif pa.types.is_date(type):
        value = st.dates()
    elif pa.types.is_timestamp(type):
        tz = pytz.timezone(type.tz) if type.tz is not None else None
        value = st.datetimes(timezones=st.just(tz))
    elif pa.types.is_binary(type):
        value = st.binary()
    elif pa.types.is_string(type):
        value = st.text()
    elif pa.types.is_decimal(type):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(type)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=type)
def test_minmax_magnitude_equal(data, mag):
    val = data.draw(st.complex_numbers(min_magnitude=mag, max_magnitude=mag))
    try:
        assume(abs(val) < float('inf'))
        assert math.isclose(abs(val), mag)
    except OverflowError:
        reject()
    except AttributeError:
        pass  # Python 2.7.3 does not have math.isclose
Example #22
0
 def test_enter_returns_self(self, filenames):
     with patch.object(prb, "time", autospec=True):
         try:
             raw_bag = prb.Bag(filenames)
             with prb.Bag(filenames) as context_bag:
                 assert context_bag.filenames == raw_bag.filenames
                 assert context_bag.process == raw_bag.process
         except prb.MissingBagError:
             hyp.reject()
Example #23
0
 def test_builtin_works_on_many_examples(source_code):
     try:
         source = source_code.encode('utf-8-sig')
     except UnicodeEncodeError:
         reject()
         raise
     tree = ast.parse(source)
     checker = BuiltinsChecker(tree, '/home/script.py')
     assert isinstance([c for c in checker.run()], list)
def test_always_reduces_integers_to_smallest_suitable_sizes(problem):
    n, blob = problem
    blob = hbytes(blob)
    try:
        d = ConjectureData.for_buffer(blob)
        k = d.draw(st.integers())
        stop = blob[len(d.buffer)]
    except (StopTest, IndexError):
        reject()

    assume(k > n)
    assume(stop > 0)

    def f(data):
        k = data.draw(st.integers())
        data.output = repr(k)
        if data.draw_bits(8) == stop and k >= n:
            data.mark_interesting()

    runner = ConjectureRunner(f, random=Random(0), settings=settings(
        suppress_health_check=HealthCheck.all(), timeout=unlimited,
        phases=(Phase.shrink,), database=None, verbosity=Verbosity.debug
    ), database_key=None)

    runner.test_function(ConjectureData.for_buffer(blob))

    assert runner.interesting_examples

    v, = runner.interesting_examples.values()

    shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING)

    shrinker.clear_passes()
    shrinker.add_new_pass('minimize_individual_blocks')

    shrinker.shrink()

    v = shrinker.shrink_target

    m = ConjectureData.for_buffer(v.buffer).draw(st.integers())
    assert m == n

    # Upper bound on the length needed is calculated as follows:
    # * We have an initial byte at the beginning to decide the length of the
    #   integer.
    # * We have a terminal byte as the stop value.
    # * The rest is the integer payload. This should be n. Including the sign
    #   bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only
    #   have power of two sizes, so it may be up to a factor of two more than
    #   that.
    bits_needed = 1 + n.bit_length()
    actual_bits_needed = min(
        [s for s in WideRangeIntStrategy.sizes if s >= bits_needed])
    bytes_needed = actual_bits_needed // 8
    # 3 extra bytes: two for the sampler, one for the capping value.
    assert len(v.buffer) == 3 + bytes_needed
Example #25
0
def test_fuzz_show_unified_diff(file_input, file_output, file_path, output):
    try:
        isort.format.show_unified_diff(
            file_input=file_input,
            file_output=file_output,
            file_path=file_path,
            output=output,
        )
    except UnicodeEncodeError:
        reject()
def test_decoding_may_fail(t):
    try:
        decode_failure(t)
        reject()
    except UnsatisfiedAssumption:
        raise  # don't silence the reject()
    except InvalidArgument:
        pass
    except Exception as e:
        assert False, 'decoding failed with %r, not InvalidArgument' % (e,)
Example #27
0
def test_decoding_may_fail(t):
    try:
        decode_failure(t)
        reject()
    except UnsatisfiedAssumption:
        raise  # don't silence the reject()
    except InvalidArgument:
        pass
    except Exception as e:
        raise AssertionError("Expected an InvalidArgument exception") from e
Example #28
0
def test_fuzz_add_to_line(comments, original_string, removed, comment_prefix):
    try:
        isort.comments.add_to_line(
            comments=comments,
            original_string=original_string,
            removed=removed,
            comment_prefix=comment_prefix,
        )
    except ValueError:
        reject()
Example #29
0
def test_generated_data_matches_schema(schema_strategy, data):
    """Check that an object drawn from an arbitrary schema is valid."""
    schema = data.draw(schema_strategy)
    try:
        value = data.draw(from_schema(schema), "value from schema")
    except InvalidArgument:
        reject()
    jsonschema.validate(value, schema)
    # This checks that our canonicalisation is semantically equivalent.
    jsonschema.validate(value, canonicalish(schema))
def test_decoding_may_fail(t):
    try:
        decode_failure(t)
        reject()
    except UnsatisfiedAssumption:
        raise  # don't silence the reject()
    except InvalidArgument:
        pass
    except Exception as e:
        assert False, "decoding failed with %r, not InvalidArgument" % (e, )
Example #31
0
def cant_serialize(media_type: str) -> NoReturn:  # type: ignore
    """Reject the current example if we don't know how to send this data to the application."""
    event_text = f"Can't serialize data to `{media_type}`."
    note(
        f"{event_text}. "
        f"You can register your own serializer with `schemathesis.serializers.register` and Schemathesis will be able "
        f"to make API calls with this media type."
    )
    event(event_text)
    reject()  # type: ignore
def test_minmax_magnitude_equal(data, mag):
    val = data.draw(st.complex_numbers(min_magnitude=mag, max_magnitude=mag))
    try:
        # Cap magnitude at 10e300 to avoid float overflow, and imprecision
        # at very large exponents (which makes math.isclose fail)
        assert math.isclose(abs(val), mag)
    except OverflowError:
        reject()
    except AttributeError:
        pass  # Python 2.7.3 does not have math.isclose
Example #33
0
def test_query_hypothesis_with_real_url(query):
    print("running test_query_hyphotesis_for_query_api()")
    try:
        response = query_api(query)
        if len(response) > 0:
            assert response == 'http://api.skywatch.co/?types=star&vmag=gt0'
        else:
            assert response == ""
    except Exception as e:
        reject()
 def test(x):
     try:
         i = next(testit)
     except StopIteration:
         return
     if i == 1:
         return
     elif i == 2:
         reject()
     else:
         raise Nope()
Example #35
0
 def _wrap(self, test_func: F) -> F:
     def wrapper(case: ArgsKwargsType, *args, **kwargs) -> None:
         ex = case
         __tracebackhide__ = True
         for validator in self.validators:
             try:
                 validator(*ex[0], **ex[1])
             except Exception:
                 hypothesis.reject()
         case = self.make_case(*ex[0], **ex[1])
         test_func(case, *args, **kwargs)
 def test(x):
     try:
         i = next(testit)
     except StopIteration:
         return
     if i == 1:
         return
     elif i == 2:
         reject()
     else:
         raise Nope()
Example #37
0
def test_tokenize_round_trip_bytes(source_code):
    try:
        source = source_code.encode("utf-8-sig")
    except UnicodeEncodeError:
        reject()
    tokens = list(tokenize.tokenize(io.BytesIO(source).readline))
    outbytes = tokenize.untokenize(
        tokens)  # may have changed whitespace from source
    output = list(tokenize.tokenize(io.BytesIO(outbytes).readline))
    assert [(t.type, t.string) for t in tokens] == [(t.type, t.string)
                                                    for t in output]
Example #38
0
def test_fuzz_stuff(data):
    pattern = data.draw(
        st.text(min_size=1, max_size=5) |
        st.binary(min_size=1, max_size=5) |
        CONSERVATIVE_REGEX.filter(bool)
    )

    try:
        regex = re.compile(pattern)
    except re.error:
        reject()

    ex = data.draw(st.from_regex(regex))
    assert regex.search(ex)
def test_generate_arbitrary_indices(data):
    min_size = data.draw(st.integers(0, 10), "min_size")
    max_size = data.draw(st.none() | st.integers(min_size, min_size + 10), "max_size")
    unique = data.draw(st.booleans(), "unique")
    dtype = data.draw(npst.scalar_dtypes(), "dtype")
    assume(supported_by_pandas(dtype))

    # Pandas bug: https://github.com/pandas-dev/pandas/pull/14916 until 0.20;
    # then int64 indexes are inferred from uint64 values.
    assume(dtype.kind != "u")

    pass_elements = data.draw(st.booleans(), "pass_elements")

    converted_dtype = pandas.Index([], dtype=dtype).dtype

    try:
        inferred_dtype = pandas.Index([data.draw(npst.from_dtype(dtype))]).dtype

        if pass_elements:
            elements = npst.from_dtype(dtype)
            dtype = None
        else:
            elements = None

        index = data.draw(
            pdst.indexes(
                elements=elements,
                dtype=dtype,
                min_size=min_size,
                max_size=max_size,
                unique=unique,
            )
        )

    except Exception as e:
        if type(e).__name__ == "OutOfBoundsDatetime":
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    if dtype is None:
        if pandas.__version__ >= "0.19":
            assert index.dtype == inferred_dtype
    else:
        assert index.dtype == converted_dtype

    if unique:
        assert len(set(index.values)) == len(index)
def test_floats_are_in_range(x, y, s):
    assume(not (math.isnan(x) or math.isnan(y)))
    assume(not (math.isinf(x) or math.isinf(y)))
    x, y = sorted((x, y))
    assume(x < y)

    @given(floats(x, y))
    @seed(s)
    @settings(max_examples=10)
    def test_is_in_range(t):
        assert x <= t <= y

    try:
        test_is_in_range()
    except Unsatisfiable:
        reject()
def test_fuzz_fractions_bounds(data):
    denom = data.draw(none() | integers(1, 100), label="denominator")
    fracs = none() | fractions(max_denominator=denom)
    low, high = data.draw(tuples(fracs, fracs), label="low, high")
    if low is not None and high is not None and low > high:
        low, high = high, low
    try:
        val = data.draw(fractions(low, high, denom), label="value")
    except InvalidArgument:
        reject()  # fractions too close for given max_denominator
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if denom is not None:
        assert 1 <= val.denominator <= denom
def test_explore_arbitrary_function(strat, data):
    cache = {}

    def predicate(x):
        try:
            return cache[x]
        except KeyError:
            return cache.setdefault(x, data.draw(st.booleans(), label=repr(x)))

    try:
        find(
            strat, predicate,
            settings=settings(
                database=None, verbosity=Verbosity.quiet, max_shrinks=1000)
        )
    except NoSuchExample:
        reject()
def test_fuzz_decimals_bounds(data):
    places = data.draw(none() | integers(0, 20), label='places')
    finite_decs = decimals(allow_nan=False, allow_infinity=False,
                           places=places) | none()
    low, high = data.draw(tuples(finite_decs, finite_decs), label='low, high')
    if low is not None and high is not None and low > high:
        low, high = high, low
    ctx = decimal.Context(prec=data.draw(integers(1, 100), label='precision'))
    try:
        with decimal.localcontext(ctx):
            strat = decimals(low, high, allow_nan=False,
                             allow_infinity=False, places=places)
            val = data.draw(strat, label='value')
    except InvalidArgument:
        reject()  # decimals too close for given places
    if low is not None:
        assert low <= val
    if high is not None:
        assert val <= high
    if places is not None:
        assert val.as_tuple().exponent == -places
def test_generate_arbitrary_indices(data):
    min_size = data.draw(st.integers(0, 10), 'min_size')
    max_size = data.draw(
        st.none() | st.integers(min_size, min_size + 10), 'max_size')
    unique = data.draw(st.booleans(), 'unique')
    dtype = data.draw(npst.scalar_dtypes(), 'dtype')
    assume(supported_by_pandas(dtype))

    pass_elements = data.draw(st.booleans(), 'pass_elements')

    converted_dtype = pandas.Index([], dtype=dtype).dtype

    try:
        inferred_dtype = pandas.Index(
            [data.draw(npst.from_dtype(dtype))]).dtype

        if pass_elements:
            elements = npst.from_dtype(dtype)
            dtype = None
        else:
            elements = None

        index = data.draw(pdst.indexes(
            elements=elements, dtype=dtype, min_size=min_size,
            max_size=max_size, unique=unique,
        ))

    except Exception as e:
        if type(e).__name__ == 'OutOfBoundsDatetime':
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    if dtype is None:
        assert index.dtype == inferred_dtype
    else:
        assert index.dtype == converted_dtype

    if unique:
        assert len(set(index.values)) == len(index)
def test_load_characters(filename):
    try:
        characters = load_characters(filename)
        assert isinstance(characters, list)
    except IOError:
        reject()
Example #46
0
 def track(x):
     count[0] += 1
     reject()
def reuse_group(draw, group_name):
    cache = draw(GROUP_CACHE_STRATEGY)
    try:
        return cache[group_name]
    except KeyError:
        reject()
 def this_has_a_totally_unique_name(x):
     calls[0] += 1
     reject()
Example #49
0
def _models_impl(draw, strat):
    """Handle the nasty part of drawing a value for models()"""
    try:
        return draw(strat)[0]
    except IntegrityError:
        reject()
 def this_has_a_unique_name_and_lives_on_a_class(self, x):
     reject()
 def test_assume_false(x):
     reject()
Example #52
0
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        type = draw(type)
    elif not isinstance(type, pa.DataType):
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size,)

    if pa.types.is_list(type):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(type.value_type, size=int(offsets.sum())))
        return pa.ListArray.from_arrays(offsets, values)

    if pa.types.is_struct(type):
        h.assume(len(type) > 0)
        names, child_arrays = [], []
        for field in type:
            names.append(field.name)
            child_arrays.append(draw(arrays(field.type, size=size)))
        # fields' metadata are lost here, because from_arrays doesn't accept
        # a fields argumentum, only names
        return pa.StructArray.from_arrays(child_arrays, names=names)

    if (pa.types.is_boolean(type) or pa.types.is_integer(type) or
            pa.types.is_floating(type)):
        values = npst.arrays(type.to_pandas_dtype(), shape=(size,))
        np_arr = draw(values)
        if pa.types.is_floating(type):
            # Workaround ARROW-4952: no easy way to assert array equality
            # in a NaN-tolerant way.
            np_arr[np.isnan(np_arr)] = -42.0
        return pa.array(np_arr, type=type)

    if pa.types.is_null(type):
        value = st.none()
    elif pa.types.is_time(type):
        value = st.times()
    elif pa.types.is_date(type):
        value = st.dates()
    elif pa.types.is_timestamp(type):
        tz = pytz.timezone(type.tz) if type.tz is not None else None
        value = st.datetimes(timezones=st.just(tz))
    elif pa.types.is_binary(type):
        value = st.binary()
    elif pa.types.is_string(type):
        value = st.text()
    elif pa.types.is_decimal(type):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(type)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=type)
Example #53
0
 def test_slow_test_times_out(x):
     time.sleep(0.05)
     reject()
 def this_has_a_unique_name_and_lives_on_a_class(self, x):
     calls2[0] += 1
     reject()
Example #55
0
 def inner(*args, **kwargs):
     try:
         return contract_func(*args, **kwargs)
     except PreconditionError:
         reject()
Example #56
0
 def track(x):
     tracker.add(x)
     reject()
 def nope(x):
     reject()
 def test(x):
     reject()
 def this_has_a_totally_unique_name(x):
     reject()
Example #60
0
 def predicate(x):
     reject()