Esempio n. 1
0
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, "suppress_health_check")
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            raise InvalidArgument(
                "Non-HealthCheck value %r of type %s is invalid in suppress_health_check."
                % (s, type(s).__name__)
            )
    return suppressions
Esempio n. 2
0
def _ensure_positive_int(x, name, since, min_value=0):
    if not isinstance(x, integer_types):
        note_deprecation("Passing non-integer %s=%r is deprecated" % (name, x),
                         since=since)
    x = try_convert(int, x, name)
    if x < min_value:
        raise InvalidArgument("%s=%r must be at least %r." %
                              (name, x, min_value))
    return x
def _ensure_positive_int(x, name, since, min_value=0):
    if not isinstance(x, integer_types):
        note_deprecation(
            "Passing non-integer %s=%r is deprecated" % (name, x), since=since
        )
    x = try_convert(int, x, name)
    if x < min_value:
        raise InvalidArgument("%s=%r must be at least %r." % (name, x, min_value))
    return x
Esempio n. 4
0
def elements_and_dtype(elements, dtype, source=None):

    if source is None:
        prefix = ""
    else:
        prefix = f"{source}."

    if elements is not None:
        check_strategy(elements, f"{prefix}elements")
    else:
        with check("dtype is not None"):
            if dtype is None:
                raise InvalidArgument(
                    f"At least one of {prefix}elements or {prefix}dtype must be provided."
                )

    with check("is_categorical_dtype"):
        if is_categorical_dtype(dtype):
            raise InvalidArgument(
                f"{prefix}dtype is categorical, which is currently unsupported"
            )

    if isinstance(
            dtype,
            type) and np.dtype(dtype).kind == "O" and dtype is not object:
        note_deprecation(
            f"Passed dtype={dtype!r} is not a valid Pandas dtype.  We'll treat it as "
            "dtype=object for now, but this will be an error in a future version.",
            since="2021-12-31",
            has_codemod=False,
        )

    dtype = try_convert(np.dtype, dtype, "dtype")

    if elements is None:
        elements = npst.from_dtype(dtype)
    elif dtype is not None:

        def convert_element(value):
            name = f"draw({prefix}elements)"
            try:
                return np.array([value], dtype=dtype)[0]
            except TypeError:
                raise InvalidArgument(
                    "Cannot convert %s=%r of type %s to dtype %s" %
                    (name, value, type(value).__name__, dtype.str)) from None
            except ValueError:
                raise InvalidArgument(
                    f"Cannot convert {name}={value!r} to type {dtype.str}"
                ) from None

        elements = elements.map(convert_element)
    assert elements is not None

    return elements, dtype
Esempio n. 5
0
def elements_and_dtype(elements, dtype, source=None):

    if source is None:
        prefix = ""
    else:
        prefix = "%s." % (source,)

    if elements is not None:
        st.check_strategy(elements, "%selements" % (prefix,))
    else:
        with check("dtype is not None"):
            if dtype is None:
                raise InvalidArgument(
                    (
                        "At least one of %(prefix)selements or %(prefix)sdtype "
                        "must be provided."
                    )
                    % {"prefix": prefix}
                )

    with check("is_categorical_dtype"):
        if is_categorical_dtype(dtype):
            raise InvalidArgument(
                "%sdtype is categorical, which is currently unsupported" % (prefix,)
            )

    dtype = try_convert(np.dtype, dtype, "dtype")

    if elements is None:
        elements = npst.from_dtype(dtype)
    elif dtype is not None:

        def convert_element(value):
            name = "draw(%selements)" % (prefix,)
            try:
                return np.array([value], dtype=dtype)[0]
            except TypeError:
                raise InvalidArgument(
                    "Cannot convert %s=%r of type %s to dtype %s"
                    % (name, value, type(value).__name__, dtype.str)
                )
            except ValueError:
                raise InvalidArgument(
                    "Cannot convert %s=%r to type %s" % (name, value, dtype.str)
                )

        elements = elements.map(convert_element)
    assert elements is not None

    return elements, dtype
Esempio n. 6
0
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, "suppress_health_check")
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            raise InvalidArgument(
                "Non-HealthCheck value %r of type %s is invalid in suppress_health_check."
                % (s, type(s).__name__))
        if s is HealthCheck.hung_test:
            note_deprecation(
                "HealthCheck.hung_test is deprecated and has no "
                "effect, as we no longer run this health check.",
                since="2019-01-24",
            )
    return suppressions
Esempio n. 7
0
def elements_and_dtype(elements, dtype, source=None):

    if source is None:
        prefix = ""
    else:
        prefix = "%s." % (source,)

    if elements is not None:
        st.check_strategy(elements, "%selements" % (prefix,))
    else:
        with check("dtype is not None"):
            if dtype is None:
                raise InvalidArgument(
                    (
                        "At least one of %(prefix)selements or %(prefix)sdtype "
                        "must be provided."
                    )
                    % {"prefix": prefix}
                )

    with check("is_categorical_dtype"):
        if is_categorical_dtype(dtype):
            raise InvalidArgument(
                "%sdtype is categorical, which is currently unsupported" % (prefix,)
            )

    dtype = try_convert(np.dtype, dtype, "dtype")

    if elements is None:
        elements = npst.from_dtype(dtype)
    elif dtype is not None:

        def convert_element(value):
            name = "draw(%selements)" % (prefix,)
            try:
                return np.array([value], dtype=dtype)[0]
            except TypeError:
                raise InvalidArgument(
                    "Cannot convert %s=%r of type %s to dtype %s"
                    % (name, value, type(value).__name__, dtype.str)
                )
            except ValueError:
                raise InvalidArgument(
                    "Cannot convert %s=%r to type %s" % (name, value, dtype.str)
                )

        elements = elements.map(convert_element)
    assert elements is not None

    return elements, dtype
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, "suppress_health_check")
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            raise InvalidArgument(
                "Non-HealthCheck value %r of type %s is invalid in suppress_health_check."
                % (s, type(s).__name__)
            )
        if s is HealthCheck.hung_test:
            note_deprecation(
                "HealthCheck.hung_test is deprecated and has no "
                "effect, as we no longer run this health check.",
                since="2019-01-24",
            )
    return suppressions
def elements_and_dtype(elements, dtype, source=None):

    if source is None:
        prefix = ''
    else:
        prefix = '%s.' % (source, )

    if elements is not None:
        check_strategy(elements, '%selements' % (prefix, ))
    else:
        with check('dtype is not None'):
            if dtype is None:
                raise InvalidArgument(
                    ('At least one of %(prefix)selements or %(prefix)sdtype '
                     'must be provided.') % {'prefix': prefix})

    with check('is_categorical_dtype'):
        if is_categorical_dtype(dtype):
            raise InvalidArgument(
                '%sdtype is categorical, which is currently unsupported' %
                (prefix, ))

    dtype = try_convert(np.dtype, dtype, 'dtype')

    if elements is None:
        elements = npst.from_dtype(dtype)
    elif dtype is not None:

        def convert_element(value):
            name = 'draw(%selements)' % (prefix, )
            try:
                return np.array([value], dtype=dtype)[0]
            except TypeError:
                raise InvalidArgument(
                    'Cannot convert %s=%r of type %s to dtype %s' %
                    (name, value, type(value).__name__, dtype.str))
            except ValueError:
                raise InvalidArgument('Cannot convert %s=%r to type %s' % (
                    name,
                    value,
                    dtype.str,
                ))

        elements = elements.map(convert_element)
    assert elements is not None

    return elements, dtype
Esempio n. 10
0
def elements_and_dtype(elements, dtype, source=None):

    if source is None:
        prefix = ""
    else:
        prefix = f"{source}."

    if elements is not None:
        check_strategy(elements, f"{prefix}elements")
    else:
        with check("dtype is not None"):
            if dtype is None:
                raise InvalidArgument(
                    f"At least one of {prefix}elements or {prefix}dtype must be provided."
                )

    with check("is_categorical_dtype"):
        if is_categorical_dtype(dtype):
            raise InvalidArgument(
                f"{prefix}dtype is categorical, which is currently unsupported"
            )

    dtype = try_convert(np.dtype, dtype, "dtype")

    if elements is None:
        elements = npst.from_dtype(dtype)
    elif dtype is not None:

        def convert_element(value):
            name = f"draw({prefix}elements)"
            try:
                return np.array([value], dtype=dtype)[0]
            except TypeError:
                raise InvalidArgument(
                    "Cannot convert %s=%r of type %s to dtype %s" %
                    (name, value, type(value).__name__, dtype.str)) from None
            except ValueError:
                raise InvalidArgument(
                    f"Cannot convert {name}={value!r} to type {dtype.str}"
                ) from None

        elements = elements.map(convert_element)
    assert elements is not None

    return elements, dtype
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, 'suppress_health_check')
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            note_deprecation((
                'Non-HealthCheck value %r of type %s in suppress_health_check '
                'will be ignored, and will become an error in a future '
                'version of Hypothesis') % (
                    s,
                    type(s).__name__,
                ))
        elif s in (HealthCheck.exception_in_generation,
                   HealthCheck.random_module):
            note_deprecation(
                ('%s is now ignored and suppressing it is a no-op. This will '
                 'become an error in a future version of Hypothesis. Simply '
                 'remove it from your list of suppressions to get the same '
                 'effect.') % (s, ))
    return suppressions
Esempio n. 12
0
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, 'suppress_health_check')
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            note_deprecation((
                'Non-HealthCheck value %r of type %s in suppress_health_check '
                'will be ignored, and will become an error in a future '
                'version of Hypothesis') % (
                s, type(s).__name__,
            ))
        elif s in (
            HealthCheck.exception_in_generation, HealthCheck.random_module
        ):
            note_deprecation((
                '%s is now ignored and suppressing it is a no-op. This will '
                'become an error in a future version of Hypothesis. Simply '
                'remove it from your list of suppressions to get the same '
                'effect.') % (s,))
    return suppressions
Esempio n. 13
0
def validate_health_check_suppressions(suppressions):
    suppressions = try_convert(list, suppressions, "suppress_health_check")
    for s in suppressions:
        if not isinstance(s, HealthCheck):
            note_deprecation(
                ("Non-HealthCheck value %r of type %s in suppress_health_check "
                 "will be ignored, and will become an error in a future "
                 "version of Hypothesis") % (s, type(s).__name__),
                since="2017-11-11",
            )
        elif s in (HealthCheck.exception_in_generation,
                   HealthCheck.random_module):
            note_deprecation(
                ("%s is now ignored and suppressing it is a no-op. This will "
                 "become an error in a future version of Hypothesis. Simply "
                 "remove it from your list of suppressions to get the same "
                 "effect.") % (s, ),
                since="2017-11-11",
            )
    return suppressions
        def assign_rows(draw):
            index = draw(index_strategy)

            result = pandas.DataFrame(OrderedDict(
                (c.name,
                 pandas.Series(np.zeros(dtype=c.dtype, shape=len(index)),
                               dtype=c.dtype)) for c in rewritten_columns),
                                      index=index)

            fills = {}

            any_unique = any(c.unique for c in rewritten_columns)

            if any_unique:
                all_seen = [
                    set() if c.unique else None for c in rewritten_columns
                ]
                while all_seen[-1] is None:
                    all_seen.pop()

            for row_index in hrange(len(index)):
                for _ in hrange(5):
                    original_row = draw(rows)
                    row = original_row
                    if isinstance(row, dict):
                        as_list = [None] * len(rewritten_columns)
                        for i, c in enumerate(rewritten_columns):
                            try:
                                as_list[i] = row[c.name]
                            except KeyError:
                                try:
                                    as_list[i] = fills[i]
                                except KeyError:
                                    fills[i] = draw(c.fill)
                                    as_list[i] = fills[i]
                        for k in row:
                            if k not in column_names:
                                raise InvalidArgument(
                                    ('Row %r contains column %r not in '
                                     'columns %r)' %
                                     (row, k,
                                      [c.name for c in rewritten_columns])))
                        row = as_list
                    if any_unique:
                        has_duplicate = False
                        for seen, value in zip(all_seen, row):
                            if seen is None:
                                continue
                            if value in seen:
                                has_duplicate = True
                                break
                            seen.add(value)
                        if has_duplicate:
                            continue
                    row = list(try_convert(tuple, row, 'draw(rows)'))

                    if len(row) > len(rewritten_columns):
                        raise InvalidArgument(
                            ('Row %r contains too many entries. Has %d but '
                             'expected at most %d') %
                            (original_row, len(row), len(rewritten_columns)))
                    while len(row) < len(rewritten_columns):
                        row.append(draw(rewritten_columns[len(row)].fill))
                    result.iloc[row_index] = row
                    break
                else:
                    reject()
            return result
def data_frames(columns=None, rows=None, index=None):
    """Provides a strategy for producing a :class:`pandas.DataFrame`.

    Arguments:

    * columns: An iterable of :class:`column` objects describing the shape
      of the generated DataFrame.

    * rows: A strategy for generating a row object. Should generate
      either dicts mapping column names to values or a sequence mapping
      column position to the value in that position (note that unlike the
      :class:`pandas.DataFrame` constructor, single values are not allowed
      here. Passing e.g. an integer is an error, even if there is only one
      column).

      At least one of rows and columns must be provided. If both are
      provided then the generated rows will be validated against the
      columns and an error will be raised if they don't match.

      Caveats on using rows:

      * In general you should prefer using columns to rows, and only use
        rows if the columns interface is insufficiently flexible to
        describe what you need - you will get better performance and
        example quality that way.
      * If you provide rows and not columns, then the shape and dtype of
        the resulting DataFrame may vary. e.g. if you have a mix of int
        and float in the values for one column in your row entries, the
        column will sometimes have an integral dtype and sometimes a float.

    * index: If not None, a strategy for generating indexes for the
      resulting DataFrame. This can generate either :class:`pandas.Index`
      objects or any sequence of values (which will be passed to the
      Index constructor).

      You will probably find it most convenient to use the
      :func:`~hypothesis.extra.pandas.indexes` or
      :func:`~hypothesis.extra.pandas.range_indexes` function to produce
      values for this argument.

    Usage:

    The expected usage pattern is that you use :class:`column` and
    :func:`columns` to specify a fixed shape of the DataFrame you want as
    follows. For example the following gives a two column data frame:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import column, data_frames
        >>> data_frames([
        ... column('A', dtype=int), column('B', dtype=float)]).example()
                    A              B
        0  2021915903  1.793898e+232
        1  1146643993            inf
        2 -2096165693   1.000000e+07

    If you want the values in different columns to interact in some way you
    can use the rows argument. For example the following gives a two column
    DataFrame where the value in the first column is always at most the value
    in the second:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import column, data_frames
        >>> import hypothesis.strategies as st
        >>> data_frames(
        ...     rows=st.tuples(st.floats(allow_nan=False),
        ...                    st.floats(allow_nan=False)).map(sorted)
        ... ).example()
                       0             1
        0  -3.402823e+38  9.007199e+15
        1 -1.562796e-298  5.000000e-01

    You can also combine the two:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import columns, data_frames
        >>> import hypothesis.strategies as st
        >>> data_frames(
        ...     columns=columns(["lo", "hi"], dtype=float),
        ...     rows=st.tuples(st.floats(allow_nan=False),
        ...                    st.floats(allow_nan=False)).map(sorted)
        ... ).example()
                 lo            hi
        0   9.314723e-49  4.353037e+45
        1  -9.999900e-01  1.000000e+07
        2 -2.152861e+134 -1.069317e-73

    (Note that the column dtype must still be specified and will not be
    inferred from the rows. This restriction may be lifted in future).

    Combining rows and columns has the following behaviour:

    * The column names and dtypes will be used.
    * If the column is required to be unique, this will be enforced.
    * Any values missing from the generated rows will be provided using the
      column's fill.
    * Any values in the row not present in the column specification (if
      dicts are passed, if there are keys with no corresponding column name,
      if sequences are passed if there are too many items) will result in
      InvalidArgument being raised.

    """

    if index is None:
        index = range_indexes()
    else:
        check_strategy(index)

    index_strategy = index

    if columns is None:
        if rows is None:
            raise InvalidArgument(
                'At least one of rows and columns must be provided')
        else:

            @st.composite
            def rows_only(draw):
                index = draw(index_strategy)

                @check_function
                def row():
                    result = draw(rows)
                    check_type(Iterable, result, 'draw(row)')
                    return result

                if len(index) > 0:
                    return pandas.DataFrame([row() for _ in index],
                                            index=index)
                else:
                    # If we haven't drawn any rows we need to draw one row and
                    # then discard it so that we get a consistent shape for the
                    # DataFrame.
                    base = pandas.DataFrame([row()])
                    return base.drop(0)

            return rows_only()

    assert columns is not None
    columns = try_convert(tuple, columns, 'columns')

    rewritten_columns = []
    column_names = set()

    for i, c in enumerate(columns):
        check_type(column, c, 'columns[%d]' % (i, ))

        c = copy(c)
        if c.name is None:
            label = 'columns[%d]' % (i, )
            c.name = i
        else:
            label = c.name
            try:
                hash(c.name)
            except TypeError:
                raise InvalidArgument(
                    'Column names must be hashable, but columns[%d].name was '
                    '%r of type %s, which cannot be hashed.' % (
                        i,
                        c.name,
                        type(c.name).__name__,
                    ))

        if c.name in column_names:
            raise InvalidArgument('duplicate definition of column name %r' %
                                  (c.name, ))

        column_names.add(c.name)

        c.elements, c.dtype = elements_and_dtype(c.elements, c.dtype, label)

        if c.dtype is None and rows is not None:
            raise InvalidArgument(
                'Must specify a dtype for all columns when combining rows with'
                ' columns.')

        c.fill = npst.fill_for(fill=c.fill,
                               elements=c.elements,
                               unique=c.unique,
                               name=label)

        rewritten_columns.append(c)

    if rows is None:

        @st.composite
        def just_draw_columns(draw):
            index = draw(index_strategy)
            local_index_strategy = st.just(index)

            data = OrderedDict((c.name, None) for c in rewritten_columns)

            # Depending on how the columns are going to be generated we group
            # them differently to get better shrinking. For columns with fill
            # enabled, the elements can be shrunk independently of the size,
            # so we can just shrink by shrinking the index then shrinking the
            # length and are generally much more free to move data around.

            # For columns with no filling the problem is harder, and drawing
            # them like that would result in rows being very far apart from
            # each other in the underlying data stream, which gets in the way
            # of shrinking. So what we do is reorder and draw those columns
            # row wise, so that the values of each row are next to each other.
            # This makes life easier for the shrinker when deleting blocks of
            # data.
            columns_without_fill = [
                c for c in rewritten_columns if c.fill.is_empty
            ]

            if columns_without_fill:
                for c in columns_without_fill:
                    data[c.name] = pandas.Series(
                        np.zeros(shape=len(index), dtype=c.dtype),
                        index=index,
                    )
                seen = {
                    c.name: set()
                    for c in columns_without_fill if c.unique
                }

                for i in hrange(len(index)):
                    for c in columns_without_fill:
                        if c.unique:
                            for _ in range(5):
                                value = draw(c.elements)
                                if value not in seen[c.name]:
                                    seen[c.name].add(value)
                                    break
                            else:
                                reject()
                        else:
                            value = draw(c.elements)
                        data[c.name][i] = value

            for c in rewritten_columns:
                if not c.fill.is_empty:
                    data[c.name] = draw(
                        series(index=local_index_strategy,
                               dtype=c.dtype,
                               elements=c.elements,
                               fill=c.fill,
                               unique=c.unique))

            return pandas.DataFrame(data, index=index)

        return just_draw_columns()
    else:

        @st.composite
        def assign_rows(draw):
            index = draw(index_strategy)

            result = pandas.DataFrame(OrderedDict(
                (c.name,
                 pandas.Series(np.zeros(dtype=c.dtype, shape=len(index)),
                               dtype=c.dtype)) for c in rewritten_columns),
                                      index=index)

            fills = {}

            any_unique = any(c.unique for c in rewritten_columns)

            if any_unique:
                all_seen = [
                    set() if c.unique else None for c in rewritten_columns
                ]
                while all_seen[-1] is None:
                    all_seen.pop()

            for row_index in hrange(len(index)):
                for _ in hrange(5):
                    original_row = draw(rows)
                    row = original_row
                    if isinstance(row, dict):
                        as_list = [None] * len(rewritten_columns)
                        for i, c in enumerate(rewritten_columns):
                            try:
                                as_list[i] = row[c.name]
                            except KeyError:
                                try:
                                    as_list[i] = fills[i]
                                except KeyError:
                                    fills[i] = draw(c.fill)
                                    as_list[i] = fills[i]
                        for k in row:
                            if k not in column_names:
                                raise InvalidArgument(
                                    ('Row %r contains column %r not in '
                                     'columns %r)' %
                                     (row, k,
                                      [c.name for c in rewritten_columns])))
                        row = as_list
                    if any_unique:
                        has_duplicate = False
                        for seen, value in zip(all_seen, row):
                            if seen is None:
                                continue
                            if value in seen:
                                has_duplicate = True
                                break
                            seen.add(value)
                        if has_duplicate:
                            continue
                    row = list(try_convert(tuple, row, 'draw(rows)'))

                    if len(row) > len(rewritten_columns):
                        raise InvalidArgument(
                            ('Row %r contains too many entries. Has %d but '
                             'expected at most %d') %
                            (original_row, len(row), len(rewritten_columns)))
                    while len(row) < len(rewritten_columns):
                        row.append(draw(rewritten_columns[len(row)].fill))
                    result.iloc[row_index] = row
                    break
                else:
                    reject()
            return result

        return assign_rows()
Esempio n. 16
0
def data_frames(
    columns=None,  # type: Sequence[column]
    rows=None,  # type: st.SearchStrategy[Union[dict, Sequence[Any]]]
    index=None,  # type: st.SearchStrategy[Ex]
):
    # type: (...) -> st.SearchStrategy[pandas.DataFrame]
    """Provides a strategy for producing a :class:`pandas.DataFrame`.

    Arguments:

    * columns: An iterable of :class:`column` objects describing the shape
      of the generated DataFrame.

    * rows: A strategy for generating a row object. Should generate
      either dicts mapping column names to values or a sequence mapping
      column position to the value in that position (note that unlike the
      :class:`pandas.DataFrame` constructor, single values are not allowed
      here. Passing e.g. an integer is an error, even if there is only one
      column).

      At least one of rows and columns must be provided. If both are
      provided then the generated rows will be validated against the
      columns and an error will be raised if they don't match.

      Caveats on using rows:

      * In general you should prefer using columns to rows, and only use
        rows if the columns interface is insufficiently flexible to
        describe what you need - you will get better performance and
        example quality that way.
      * If you provide rows and not columns, then the shape and dtype of
        the resulting DataFrame may vary. e.g. if you have a mix of int
        and float in the values for one column in your row entries, the
        column will sometimes have an integral dtype and sometimes a float.

    * index: If not None, a strategy for generating indexes for the
      resulting DataFrame. This can generate either :class:`pandas.Index`
      objects or any sequence of values (which will be passed to the
      Index constructor).

      You will probably find it most convenient to use the
      :func:`~hypothesis.extra.pandas.indexes` or
      :func:`~hypothesis.extra.pandas.range_indexes` function to produce
      values for this argument.

    Usage:

    The expected usage pattern is that you use :class:`column` and
    :func:`columns` to specify a fixed shape of the DataFrame you want as
    follows. For example the following gives a two column data frame:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import column, data_frames
        >>> data_frames([
        ... column('A', dtype=int), column('B', dtype=float)]).example()
                    A              B
        0  2021915903  1.793898e+232
        1  1146643993            inf
        2 -2096165693   1.000000e+07

    If you want the values in different columns to interact in some way you
    can use the rows argument. For example the following gives a two column
    DataFrame where the value in the first column is always at most the value
    in the second:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import column, data_frames
        >>> import hypothesis.strategies as st
        >>> data_frames(
        ...     rows=st.tuples(st.floats(allow_nan=False),
        ...                    st.floats(allow_nan=False)).map(sorted)
        ... ).example()
                       0             1
        0  -3.402823e+38  9.007199e+15
        1 -1.562796e-298  5.000000e-01

    You can also combine the two:

    .. code-block:: pycon

        >>> from hypothesis.extra.pandas import columns, data_frames
        >>> import hypothesis.strategies as st
        >>> data_frames(
        ...     columns=columns(["lo", "hi"], dtype=float),
        ...     rows=st.tuples(st.floats(allow_nan=False),
        ...                    st.floats(allow_nan=False)).map(sorted)
        ... ).example()
                 lo            hi
        0   9.314723e-49  4.353037e+45
        1  -9.999900e-01  1.000000e+07
        2 -2.152861e+134 -1.069317e-73

    (Note that the column dtype must still be specified and will not be
    inferred from the rows. This restriction may be lifted in future).

    Combining rows and columns has the following behaviour:

    * The column names and dtypes will be used.
    * If the column is required to be unique, this will be enforced.
    * Any values missing from the generated rows will be provided using the
      column's fill.
    * Any values in the row not present in the column specification (if
      dicts are passed, if there are keys with no corresponding column name,
      if sequences are passed if there are too many items) will result in
      InvalidArgument being raised.
    """
    if index is None:
        index = range_indexes()
    else:
        st.check_strategy(index)

    index_strategy = index

    if columns is None:
        if rows is None:
            raise InvalidArgument("At least one of rows and columns must be provided")
        else:

            @st.composite
            def rows_only(draw):
                index = draw(index_strategy)

                @check_function
                def row():
                    result = draw(rows)
                    check_type(Iterable, result, "draw(row)")
                    return result

                if len(index) > 0:
                    return pandas.DataFrame([row() for _ in index], index=index)
                else:
                    # If we haven't drawn any rows we need to draw one row and
                    # then discard it so that we get a consistent shape for the
                    # DataFrame.
                    base = pandas.DataFrame([row()])
                    return base.drop(0)

            return rows_only()

    assert columns is not None
    cols = try_convert(tuple, columns, "columns")  # type: Sequence[column]

    rewritten_columns = []
    column_names = set()  # type: Set[str]

    for i, c in enumerate(cols):
        check_type(column, c, "columns[%d]" % (i,))

        c = copy(c)
        if c.name is None:
            label = "columns[%d]" % (i,)
            c.name = i
        else:
            label = c.name
            try:
                hash(c.name)
            except TypeError:
                raise InvalidArgument(
                    "Column names must be hashable, but columns[%d].name was "
                    "%r of type %s, which cannot be hashed."
                    % (i, c.name, type(c.name).__name__)
                )

        if c.name in column_names:
            raise InvalidArgument("duplicate definition of column name %r" % (c.name,))

        column_names.add(c.name)

        c.elements, c.dtype = elements_and_dtype(c.elements, c.dtype, label)

        if c.dtype is None and rows is not None:
            raise InvalidArgument(
                "Must specify a dtype for all columns when combining rows with"
                " columns."
            )

        c.fill = npst.fill_for(
            fill=c.fill, elements=c.elements, unique=c.unique, name=label
        )

        rewritten_columns.append(c)

    if rows is None:

        @st.composite
        def just_draw_columns(draw):
            index = draw(index_strategy)
            local_index_strategy = st.just(index)

            data = OrderedDict((c.name, None) for c in rewritten_columns)

            # Depending on how the columns are going to be generated we group
            # them differently to get better shrinking. For columns with fill
            # enabled, the elements can be shrunk independently of the size,
            # so we can just shrink by shrinking the index then shrinking the
            # length and are generally much more free to move data around.

            # For columns with no filling the problem is harder, and drawing
            # them like that would result in rows being very far apart from
            # each other in the underlying data stream, which gets in the way
            # of shrinking. So what we do is reorder and draw those columns
            # row wise, so that the values of each row are next to each other.
            # This makes life easier for the shrinker when deleting blocks of
            # data.
            columns_without_fill = [c for c in rewritten_columns if c.fill.is_empty]

            if columns_without_fill:
                for c in columns_without_fill:
                    data[c.name] = pandas.Series(
                        np.zeros(shape=len(index), dtype=c.dtype), index=index
                    )
                seen = {c.name: set() for c in columns_without_fill if c.unique}

                for i in hrange(len(index)):
                    for c in columns_without_fill:
                        if c.unique:
                            for _ in range(5):
                                value = draw(c.elements)
                                if value not in seen[c.name]:
                                    seen[c.name].add(value)
                                    break
                            else:
                                reject()
                        else:
                            value = draw(c.elements)
                        data[c.name][i] = value

            for c in rewritten_columns:
                if not c.fill.is_empty:
                    data[c.name] = draw(
                        series(
                            index=local_index_strategy,
                            dtype=c.dtype,
                            elements=c.elements,
                            fill=c.fill,
                            unique=c.unique,
                        )
                    )

            return pandas.DataFrame(data, index=index)

        return just_draw_columns()
    else:

        @st.composite
        def assign_rows(draw):
            index = draw(index_strategy)

            result = pandas.DataFrame(
                OrderedDict(
                    (
                        c.name,
                        pandas.Series(
                            np.zeros(dtype=c.dtype, shape=len(index)), dtype=c.dtype
                        ),
                    )
                    for c in rewritten_columns
                ),
                index=index,
            )

            fills = {}

            any_unique = any(c.unique for c in rewritten_columns)

            if any_unique:
                all_seen = [set() if c.unique else None for c in rewritten_columns]
                while all_seen[-1] is None:
                    all_seen.pop()

            for row_index in hrange(len(index)):
                for _ in hrange(5):
                    original_row = draw(rows)
                    row = original_row
                    if isinstance(row, dict):
                        as_list = [None] * len(rewritten_columns)
                        for i, c in enumerate(rewritten_columns):
                            try:
                                as_list[i] = row[c.name]
                            except KeyError:
                                try:
                                    as_list[i] = fills[i]
                                except KeyError:
                                    fills[i] = draw(c.fill)
                                    as_list[i] = fills[i]
                        for k in row:
                            if k not in column_names:
                                raise InvalidArgument(
                                    "Row %r contains column %r not in columns %r)"
                                    % (row, k, [c.name for c in rewritten_columns])
                                )
                        row = as_list
                    if any_unique:
                        has_duplicate = False
                        for seen, value in zip(all_seen, row):
                            if seen is None:
                                continue
                            if value in seen:
                                has_duplicate = True
                                break
                            seen.add(value)
                        if has_duplicate:
                            continue
                    row = list(try_convert(tuple, row, "draw(rows)"))

                    if len(row) > len(rewritten_columns):
                        raise InvalidArgument(
                            (
                                "Row %r contains too many entries. Has %d but "
                                "expected at most %d"
                            )
                            % (original_row, len(row), len(rewritten_columns))
                        )
                    while len(row) < len(rewritten_columns):
                        row.append(draw(rewritten_columns[len(row)].fill))
                    result.iloc[row_index] = row
                    break
                else:
                    reject()
            return result

        return assign_rows()
Esempio n. 17
0
        def assign_rows(draw):
            index = draw(index_strategy)

            result = pandas.DataFrame(
                OrderedDict(
                    (
                        c.name,
                        pandas.Series(
                            np.zeros(dtype=c.dtype, shape=len(index)), dtype=c.dtype
                        ),
                    )
                    for c in rewritten_columns
                ),
                index=index,
            )

            fills = {}

            any_unique = any(c.unique for c in rewritten_columns)

            if any_unique:
                all_seen = [set() if c.unique else None for c in rewritten_columns]
                while all_seen[-1] is None:
                    all_seen.pop()

            for row_index in hrange(len(index)):
                for _ in hrange(5):
                    original_row = draw(rows)
                    row = original_row
                    if isinstance(row, dict):
                        as_list = [None] * len(rewritten_columns)
                        for i, c in enumerate(rewritten_columns):
                            try:
                                as_list[i] = row[c.name]
                            except KeyError:
                                try:
                                    as_list[i] = fills[i]
                                except KeyError:
                                    fills[i] = draw(c.fill)
                                    as_list[i] = fills[i]
                        for k in row:
                            if k not in column_names:
                                raise InvalidArgument(
                                    "Row %r contains column %r not in columns %r)"
                                    % (row, k, [c.name for c in rewritten_columns])
                                )
                        row = as_list
                    if any_unique:
                        has_duplicate = False
                        for seen, value in zip(all_seen, row):
                            if seen is None:
                                continue
                            if value in seen:
                                has_duplicate = True
                                break
                            seen.add(value)
                        if has_duplicate:
                            continue
                    row = list(try_convert(tuple, row, "draw(rows)"))

                    if len(row) > len(rewritten_columns):
                        raise InvalidArgument(
                            (
                                "Row %r contains too many entries. Has %d but "
                                "expected at most %d"
                            )
                            % (original_row, len(row), len(rewritten_columns))
                        )
                    while len(row) < len(rewritten_columns):
                        row.append(draw(rewritten_columns[len(row)].fill))
                    result.iloc[row_index] = row
                    break
                else:
                    reject()
            return result