コード例 #1
0
ファイル: strategies.py プロジェクト: DanRyanIrish/sunpy
def range_time(min_date, max_date=datetime.datetime.utcnow()):
    time = datetimes(
        min_value=datetime.datetime(1960, 1, 1, 0, 0),
        max_value=datetime.datetime(datetime.MAXYEAR, 1, 1, 0, 0),
    )
    time = time.filter(lambda x: min_date < x < max_date)
    return time_attr(time=time)
コード例 #2
0
ファイル: strategies.py プロジェクト: Cadair/sunpy
def Times(draw, max_value, min_value):
    time = one_of(datetimes(max_value=max_value, min_value=min_value),
                  TimesLeapsecond)

    time = Time(draw(time))

    return time
コード例 #3
0
ファイル: job.py プロジェクト: whitewhim2718/TopChef
def jobs(
        draw,
        ids=uuids(),
        statuses=sampled_from(JobInterface.JobStatus),
        parameters=dictionaries(text(), text()),
        results=dictionaries(text(), text()),
        dates_submitted=datetimes(),
        registration_schemas=dictionaries(text(), text()),
        result_schemas=dictionaries(text(), text())
) -> JobInterface:
    """

    :param draw: A function that can take a strategy and draw a datum from it
    :param ids: A hypothesis strategy (statisticians should read "random
        variable"), that represents the set of all valid job IDs
    :param statuses: A hypothesis strategy that samples from the set of all
        allowed job statuses
    :param parameters: A hypothesis strategy that samples from all job
        parameters
    :param results: A hypothesis strategy that represents the possible results
    :param dates_submitted: A hypothesis strategy that represents the
        possible dates that can be submitted
    :param registration_schemas: The possible job registration schemas
    :param result_schemas: The possible job result schemas
    :return: A randomly-generated implementation of :class:`JobInterface`
    """
    return Job(
        draw(ids), draw(statuses), draw(parameters), draw(results),
        draw(dates_submitted),
        draw(registration_schemas),
        draw(result_schemas)
    )
コード例 #4
0
def test_overflow_in_simplify():
    # we shouldn't trigger a pytz bug when we're simplifying
    minimal(
        datetimes(
            min_value=dt.datetime.max - dt.timedelta(days=3), timezones=timezones()
        ),
        lambda x: x.tzinfo != pytz.UTC,
    )
コード例 #5
0
def test_bordering_on_a_leap_year():
    x = minimal(
        datetimes(
            dt.datetime.min.replace(year=2003), dt.datetime.max.replace(year=2005)
        ),
        lambda x: x.month == 2 and x.day == 29,
        timeout_after=60,
    )
    assert x.year == 2004
コード例 #6
0
ファイル: test_api_license.py プロジェクト: infobyte/faraday
def license_json():
    return st.fixed_dictionaries(
        {
            "lictype": st.one_of(st.none(), st.text()),
            "metadata": st.fixed_dictionaries({
                "update_time": st.floats(),
                "update_user": st.one_of(st.none(), st.text()),
                "update_action": st.integers(),
                "creator": st.one_of(st.none(), st.text()),
                "create_time": st.floats(),
                "update_controller_action": st.one_of(st.none(), st.text()),
            "owner": st.one_of(st.none(), st.text())}),
            "notes": st.one_of(st.none(), st.text()),
            "product": st.one_of(st.none(), st.text()),
            "start": st.datetimes(),
            "end": st.datetimes(),
            "type": st.one_of(st.none(), st.text())
         })
コード例 #7
0
ファイル: test_service.py プロジェクト: mithrandi/txacme
def panicing_certs_fixture(draw):
    now = draw(datetimes(
        min_value=datetime(1971, 1, 1), max_value=datetime(2030, 1, 1)))
    panic = timedelta(seconds=draw(
        s.integers(min_value=60, max_value=60 * 60 * 24)))
    certs = dict(
        draw(
            s.lists(
                panicing_cert(now, panic),
                min_size=1,
                unique_by=lambda i: i[0])))
    return AcmeFixture(now=now, panic_interval=panic, certs=certs)
コード例 #8
0
ファイル: strategies.py プロジェクト: DanRyanIrish/sunpy
def time_attr(draw, time=datetimes(
    max_value=datetime.datetime(datetime.datetime.utcnow().year, 1, 1, 0, 0),
    min_value=datetime.datetime(1900, 1, 1, 0, 0)
    ),
              delta=timedelta()):
    """
    Create an a.Time where it's always positive and doesn't have a massive time
    delta.
    """
    t1 = draw(time)
    t2 = t1 + draw(delta)
    # We can't download data from the future...
    assume(t2 < datetime.datetime.utcnow())

    return a.Time(t1, t2)
コード例 #9
0
ファイル: strategies.py プロジェクト: DanRyanIrish/sunpy
def goes_time(draw, time=datetimes(
    max_value=datetime.datetime(datetime.datetime.utcnow().year, 1, 1, 0, 0),
    min_value=datetime.datetime(1981, 1, 1, 0, 0)),
              delta=timedelta()):
    """
    Create an a.Time where it's always positive and doesn't have a massive time
    delta.
    """
    t1 = draw(time)
    t2 = t1 + draw(delta)
    # We can't download data from the future.
    assume(t2 < datetime.datetime.utcnow())

    tr = TimeRange(t1, t2)
    # There is no GOES data for this date.
    assume(datetime.datetime(1983, 5, 1, 0, 0, 0) not in tr)
    assume((datetime.datetime(1983, 5, 1) + draw(delta)) not in tr)

    return a.Time(tr)
コード例 #10
0
ファイル: datetime.py プロジェクト: Wilfred/hypothesis-python
def datetimes(allow_naive=None, timezones=None, min_year=None, max_year=None):
    """Return a strategy for generating datetimes.

    .. deprecated:: 3.9.0
        use :py:func:`hypothesis.strategies.datetimes` instead.

    allow_naive=True will cause the values to sometimes be naive.
    timezones is the set of permissible timezones. If set to an empty
    collection all datetimes will be naive. If set to None all timezones
    available via pytz will be used.

    All generated datetimes will be between min_year and max_year, inclusive.
    """
    note_deprecation('Use hypothesis.strategies.datetimes, which supports '
                     'full-precision bounds and has a simpler API.')
    min_dt = convert_year_bound(min_year, dt.datetime.min)
    max_dt = convert_year_bound(max_year, dt.datetime.max)
    tzs = tz_args_strat(allow_naive, timezones, 'datetimes')
    return st.datetimes(min_dt, max_dt, tzs)
コード例 #11
0
ファイル: models.py プロジェクト: Wilfred/hypothesis-python
def field_mappings():
    global __default_field_mappings

    if __default_field_mappings is None:
        # Sized fields are handled in _get_strategy_for_field()
        # URL fields are not yet handled
        __default_field_mappings = {
            dm.SmallIntegerField: st.integers(-32768, 32767),
            dm.IntegerField: st.integers(-2147483648, 2147483647),
            dm.BigIntegerField:
                st.integers(-9223372036854775808, 9223372036854775807),
            dm.PositiveIntegerField: st.integers(0, 2147483647),
            dm.PositiveSmallIntegerField: st.integers(0, 32767),
            dm.BinaryField: st.binary(),
            dm.BooleanField: st.booleans(),
            dm.DateField: st.dates(),
            dm.DateTimeField: st.datetimes(timezones=get_tz_strat()),
            dm.DurationField: st.timedeltas(),
            dm.EmailField: emails(),
            dm.FloatField: st.floats(),
            dm.NullBooleanField: st.one_of(st.none(), st.booleans()),
            dm.TimeField: st.times(timezones=get_tz_strat()),
            dm.UUIDField: st.uuids(),
        }

        # SQLite does not support timezone-aware times, or timedeltas that
        # don't fit in six bytes of microseconds, so we override those
        db = getattr(django_settings, 'DATABASES', {}).get('default', {})
        if db.get('ENGINE', '').endswith('.sqlite3'):  # pragma: no branch
            sqlite_delta = timedelta(microseconds=2 ** 47 - 1)
            __default_field_mappings.update({
                dm.TimeField: st.times(),
                dm.DurationField: st.timedeltas(-sqlite_delta, sqlite_delta),
            })

    return __default_field_mappings
コード例 #12
0
def test_timezones_arg_to_datetimes_must_be_search_strategy():
    with pytest.raises(InvalidArgument):
        datetimes(timezones=pytz.all_timezones).validate()
    with pytest.raises(InvalidArgument):
        tz = [pytz.timezone(t) for t in pytz.all_timezones]
        datetimes(timezones=tz).validate()
コード例 #13
0
def test_simplifies_towards_millenium():
    d = minimal(datetimes())
    assert d.year == 2000
    assert d.month == d.day == 1
    assert d.hour == d.minute == d.second == d.microsecond == 0
コード例 #14
0
def tag_triple_for_football(draw, dts=st.datetimes(), bools=st.booleans()):
    return ("football", draw(dts), draw(bools))
コード例 #15
0
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()
コード例 #16
0
def test_datetimes_can_exclude_imaginary(kw):
    # Sanity check: fail unless those days contain an imaginary hour to filter out
    find_any(datetimes(**kw, allow_imaginary=True), lambda x: not datetime_exists(x))

    # Assert that with allow_imaginary=False we only generate existing datetimes.
    assert_all_examples(datetimes(**kw, allow_imaginary=False), datetime_exists)
コード例 #17
0
class TestToTimeIndexSeries:
    @given(st.lists(st.floats()), st.datetimes(), available_freqs())
    def test_list_as_input(
        self, input_list: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(input_list)
        expected_time_series = pandas_series_with_period_index(
            input_list, start, freq=freq
        )
        assert_series_equal(computed_time_series, expected_time_series)

    @given(
        arrays(shape=st.integers(0, 1000), dtype=float),
        st.datetimes(),
        available_freqs(),
    )
    def test_array_as_input(
        self, input_array: np.ndarray, start: datetime, freq: pd.Timedelta,
    ):
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            input_array
        )
        expected_time_series = pandas_series_with_period_index(
            input_array, start, freq=freq
        )
        assert_series_equal(computed_time_series, expected_time_series)

    @given(series_with_period_index(), st.datetimes(), available_freqs())
    def test_period_index_series_unchanged(
        self, period_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            period_index_series
        )
        assert_series_equal(computed_time_series, period_index_series)

    @given(series_with_datetime_index(), st.datetimes(), available_freqs())
    def test_datetime_index_series_unchanged(
        self, datetime_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            datetime_index_series
        )
        assert_series_equal(computed_time_series, datetime_index_series)

    @given(series_with_timedelta_index(), st.datetimes(), available_freqs())
    def test_timedelta_index_series_unchanged(
        self, timedelta_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            timedelta_index_series
        )
        assert_series_equal(computed_time_series, timedelta_index_series)

    @given(st.tuples())
    def test_wrong_input_type(self, wrong_input: Tuple):
        time_series_preparation = TimeSeriesPreparation()
        with pytest.raises(TypeError):
            time_series_preparation._to_time_index_series(wrong_input)

    @given(series_with_period_index(), st.datetimes(), available_freqs())
    def test_period_index_dataframe_unchanged(
        self, period_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        period_index_dataframe = pd.DataFrame(period_index_series)
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            period_index_dataframe
        )
        assert_series_equal(computed_time_series, period_index_series)

    @given(series_with_datetime_index(), st.datetimes(), available_freqs())
    def test_datetime_index_dataframe_unchanged(
        self, datetime_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        datetime_index_dataframe = pd.DataFrame(datetime_index_series)
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            datetime_index_dataframe
        )
        assert_series_equal(computed_time_series, datetime_index_series)

    @given(series_with_timedelta_index(), st.datetimes(), available_freqs())
    def test_timedelta_index_dataframe_unchanged(
        self, timedelta_index_series: pd.Series, start: datetime, freq: pd.Timedelta,
    ):
        timedelta_index_dataframe = pd.DataFrame(timedelta_index_series)
        time_series_preparation = TimeSeriesPreparation(start=start, freq=freq)
        computed_time_series = time_series_preparation._to_time_index_series(
            timedelta_index_dataframe
        )
        assert_series_equal(computed_time_series, timedelta_index_series)
コード例 #18
0
def test_utc_is_minimal():
    assert tz.UTC is minimal(timezones())


def test_can_generate_non_naive_time():
    assert minimal(times(timezones=timezones()),
                   lambda d: d.tzinfo).tzinfo == tz.UTC


def test_can_generate_non_naive_datetime():
    assert minimal(datetimes(timezones=timezones()),
                   lambda d: d.tzinfo).tzinfo == tz.UTC


@given(datetimes(timezones=timezones()))
def test_timezone_aware_datetimes_are_timezone_aware(dt):
    assert dt.tzinfo is not None


@given(sampled_from(['min_value', 'max_value']),
       datetimes(timezones=timezones()))
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()


def test_timezones_arg_to_datetimes_must_be_search_strategy():
    all_timezones = zoneinfo.get_zonefile_instance().zones
    with pytest.raises(InvalidArgument):
        datetimes(timezones=all_timezones).validate()
コード例 #19
0
from hypothesis.errors import InvalidArgument
from hypothesis.strategies._internal.datetime import zoneinfo
from tests.common.debug import assert_no_examples, find_any, minimal


def test_utc_is_minimal():
    assert minimal(st.timezones()) is zoneinfo.ZoneInfo("UTC")


def test_can_generate_non_utc():
    find_any(
        st.datetimes(
            timezones=st.timezones()).filter(lambda d: d.tzinfo.key != "UTC"))


@given(st.data(), st.datetimes(), st.datetimes())
def test_datetimes_stay_within_naive_bounds(data, lo, hi):
    if lo > hi:
        lo, hi = hi, lo
    out = data.draw(st.datetimes(lo, hi, timezones=st.timezones()))
    assert lo <= out.replace(tzinfo=None) <= hi


@pytest.mark.parametrize("kwargs", [{"no_cache": 1}])
def test_timezones_argument_validation(kwargs):
    with pytest.raises(InvalidArgument):
        st.timezones(**kwargs).validate()


@pytest.mark.parametrize(
    "kwargs",
コード例 #20
0
    t.reset()
    assert not t.is_expired()
    assert t.is_expired()


@pytest.mark.smoke
@freezegun.freeze_time(auto_tick_seconds=5)
def test_reset_and_set():
    t = Timeout(10000)
    assert not t.is_expired()
    t.reset(5000)
    assert t.is_expired()


@given(timeout=st.integers(min_value=0)
       | st.datetimes(min_value=datetime.datetime.now(),
                      max_value=datetime.datetime(2038, 1, 1)))
@freezegun.freeze_time(auto_tick_seconds=1)
def test_remaining_type(timeout):
    t = Timeout(timeout)
    assert isinstance(t.remaining(), int)


@given(timeout=st.integers(min_value=0)
       | st.datetimes(min_value=datetime.datetime.now(),
                      max_value=datetime.datetime(2038, 1, 1)))
@freezegun.freeze_time(auto_tick_seconds=1)
def test_elapsed_type(timeout):
    t = Timeout(timeout)
    assert isinstance(t.elapsed(), int)

コード例 #21
0
def test_bordering_on_a_leap_year():
    x = minimal(datetimes(dt.datetime.min.replace(year=2003),
                          dt.datetime.max.replace(year=2005)),
                lambda x: x.month == 2 and x.day == 29,
                timeout_after=60)
    assert x.year == 2004
コード例 #22
0
def datetime_strat(field):
    # MongoDB datetimes have only millisecond precision
    return strat.datetimes().map(
        lambda dt: dt.replace(microsecond=(dt.microsecond // 1000 * 1000)))
コード例 #23
0
def complex_datetime_strat(field):
    return strat.datetimes(min_value=datetime.datetime(1900, 1, 1))
コード例 #24
0
ファイル: _fields.py プロジェクト: DomiVesalius/StormyBot
def _for_datetime(field):
    if getattr(django.conf.settings, "USE_TZ", False):
        return st.datetimes(timezones=timezones())
    return st.datetimes()
コード例 #25
0
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        ty = draw(type)
    elif isinstance(type, pa.DataType):
        ty = type
    else:
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size, )

    if pa.types.is_list(ty) or pa.types.is_large_list(ty):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(ty.value_type, size=int(offsets.sum())))
        if pa.types.is_large_list(ty):
            array_type = pa.LargeListArray
        else:
            array_type = pa.ListArray
        return array_type.from_arrays(offsets, values)

    if pa.types.is_struct(ty):
        h.assume(len(ty) > 0)
        fields, child_arrays = [], []
        for field in ty:
            fields.append(field)
            child_arrays.append(draw(arrays(field.type, size=size)))
        return pa.StructArray.from_arrays(child_arrays, fields=fields)

    if (pa.types.is_boolean(ty) or pa.types.is_integer(ty)
            or pa.types.is_floating(ty)):
        values = npst.arrays(ty.to_pandas_dtype(), shape=(size, ))
        np_arr = draw(values)
        if pa.types.is_floating(ty):
            # Workaround ARROW-4952: no easy way to assert array equality
            # in a NaN-tolerant way.
            np_arr[np.isnan(np_arr)] = -42.0
        return pa.array(np_arr, type=ty)

    if pa.types.is_null(ty):
        value = st.none()
    elif pa.types.is_time(ty):
        value = st.times()
    elif pa.types.is_date(ty):
        value = st.dates()
    elif pa.types.is_timestamp(ty):
        min_int64 = -(2**63)
        max_int64 = 2**63 - 1
        min_datetime = datetime.datetime.fromtimestamp(min_int64 / 10**9)
        max_datetime = datetime.datetime.fromtimestamp(max_int64 / 10**9)
        try:
            offset_hours = int(ty.tz)
            tz = pytz.FixedOffset(offset_hours * 60)
        except ValueError:
            tz = pytz.timezone(ty.tz)
        value = st.datetimes(timezones=st.just(tz),
                             min_value=min_datetime,
                             max_value=max_datetime)
    elif pa.types.is_duration(ty):
        value = st.timedeltas()
    elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
        value = st.binary()
    elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
        value = st.text()
    elif pa.types.is_decimal(ty):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(ty)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=ty)
コード例 #26
0
ファイル: test_specification.py プロジェクト: equinor/ecl3
def makedate(dt):
    """
    The STARTDAT date format does not use seconds, but instead embed that
    information in microseconds, but python datetime does not support second
    wrap-around generate a STARTDAT array from a datetime.
    """
    day = dt.day
    month = dt.month
    year = dt.year
    hour = dt.hour
    minute = dt.minute
    microsecond = dt.second * 1000000 + dt.microsecond
    return dt, [day, month, year, hour, minute, microsecond]


@given(st.builds(makedate, st.datetimes()))
def test_valid_startdates(startdates):
    expected, array = startdates
    kw = ('STARTDAT', array)
    s = summary.summary([kw])
    assert s.startdate == expected


def test_minimal_from_keywords():
    s = summary.summary(minimal_keywords)
    s.check_integrity()
    assert s.nlist == 2
    assert s.keywords == ['WOPR', 'WOPT']
    assert s.wgnames == ['W1', 'W2']
    assert s.units == ['SM3/DAY', 'SM3']
    assert s.startdate == datetime.datetime(
コード例 #27
0
class TestTransform:
    @given(st.lists(st.floats()), st.datetimes(), available_freqs(), st.text())
    def test_list_as_input(
        self,
        input_list: pd.Series,
        start: datetime,
        freq: pd.Timedelta,
        output_name: str,
    ):
        time_series_preparation = TimeSeriesPreparation(
            start=start, freq=freq, output_name=output_name
        )
        computed_time_series = time_series_preparation.transform(input_list)
        expected_series = pandas_series_with_period_index(input_list, start, freq=freq)
        expected_time_series = pd.DataFrame({output_name: expected_series})
        assert_frame_equal(computed_time_series, expected_time_series)

    @given(
        arrays(shape=st.integers(0, 1000), dtype=float),
        st.datetimes(),
        available_freqs(),
        st.text(),
    )
    def test_array_as_input(
        self,
        input_array: np.ndarray,
        start: datetime,
        freq: pd.Timedelta,
        output_name: str,
    ):
        time_series_preparation = TimeSeriesPreparation(
            start=start, freq=freq, output_name=output_name
        )
        computed_time_series = time_series_preparation.transform(input_array)
        expected_series = pandas_series_with_period_index(input_array, start, freq=freq)
        expected_time_series = pd.DataFrame({output_name: expected_series})
        assert_frame_equal(computed_time_series, expected_time_series)

    @given(series_with_period_index(), st.datetimes(), available_freqs(), st.text())
    def test_period_index_as_input(
        self,
        period_index_series: pd.Series,
        start: datetime,
        freq: pd.Timedelta,
        output_name: str,
    ):
        time_series_preparation = TimeSeriesPreparation(
            start=start, freq=freq, output_name=output_name
        )
        computed_time_series = time_series_preparation.transform(period_index_series)
        expected_time_series = pd.DataFrame({output_name: period_index_series})
        assert_frame_equal(computed_time_series, expected_time_series)

    @given(series_with_datetime_index(), st.datetimes(), available_freqs(), st.text())
    def test_datetime_index_as_input(
        self,
        datetime_index_series: pd.Series,
        start: datetime,
        freq: pd.Timedelta,
        output_name: str,
    ):
        time_series_preparation = TimeSeriesPreparation(
            start=start, freq=freq, output_name=output_name
        )
        computed_time_series = time_series_preparation.transform(datetime_index_series)
        expected_series = datetime_index_series_to_period_index_series(
            datetime_index_series, freq=freq
        )
        expected_time_series = pd.DataFrame({output_name: expected_series})
        assert_frame_equal(computed_time_series, expected_time_series)

    @given(series_with_timedelta_index(), st.datetimes(), available_freqs(), st.text())
    def test_timedelta_index_as_input(
        self,
        timedelta_index_series: pd.Series,
        start: datetime,
        freq: pd.Timedelta,
        output_name: str,
    ):
        time_series_preparation = TimeSeriesPreparation(
            start=start, freq=freq, output_name=output_name
        )
        computed_time_series = time_series_preparation.transform(timedelta_index_series)
        expected_series = timedelta_index_series_to_period_index_series(
            timedelta_index_series, freq=freq
        )
        expected_time_series = pd.DataFrame({output_name: expected_series})
        assert_frame_equal(computed_time_series, expected_time_series)
コード例 #28
0
def test_can_generate_non_utc():
    find_any(
        st.datetimes(
            timezones=st.timezones()).filter(lambda d: d.tzinfo.key != "UTC"))
コード例 #29
0
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()
コード例 #30
0
        st.datetimes(timezones=timezones() | st.none()) |
        st.dates() |
        st.times(timezones=timezones() | st.none()) |
        st.timedeltas() |
        st.booleans() |
        st.none()
    )


hashable_primitives = (
    st.booleans() |
    st.integers() |
    st.floats(allow_nan=False) |
    st.text() |
    st.binary() |
    st.datetimes() |
    st.timedeltas()
)


def hashable_containers(primitives):
    def extend(base):
        return st.one_of(
            st.frozensets(base, max_size=50),
            st.lists(base, max_size=50).map(tuple),
        )
    return st.recursive(primitives, extend)


def containers(primitives):
    def extend(base):
コード例 #31
0
def test_datetimes_stay_within_naive_bounds(data, lo, hi):
    if lo > hi:
        lo, hi = hi, lo
    out = data.draw(datetimes(lo, hi, timezones=timezones()))
    assert lo <= out.replace(tzinfo=None) <= hi
コード例 #32
0
from pandas.compat import is_platform_windows
from pandas.compat.numpy import np_array_datetime64_compat

import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range

import pandas.io.date_converters as conv

# constant
_DEFAULT_DATETIME = datetime(1, 1, 1)

# Strategy for hypothesis
if is_platform_windows():
    date_strategy = st.datetimes(min_value=datetime(1900, 1, 1))
else:
    date_strategy = st.datetimes()


def test_separator_date_conflict(all_parsers):
    # Regression test for gh-4678
    #
    # Make sure thousands separator and
    # date parsing do not conflict.
    parser = all_parsers
    data = "06-02-2013;13:00;1-000.215"
    expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
                         columns=["Date", 2])

    df = parser.read_csv(
コード例 #33
0
def test_can_generate_non_naive_datetime():
    assert (
        minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC
    )
コード例 #34
0
def test_utc_is_minimal():
    assert pytz.UTC is minimal(timezones())


def test_can_generate_non_naive_time():
    assert minimal(times(timezones=timezones()),
                   lambda d: d.tzinfo).tzinfo == pytz.UTC


def test_can_generate_non_naive_datetime():
    assert (minimal(datetimes(timezones=timezones()),
                    lambda d: d.tzinfo).tzinfo == pytz.UTC)


@given(datetimes(timezones=timezones()))
def test_timezone_aware_datetimes_are_timezone_aware(dt):
    assert dt.tzinfo is not None


@given(sampled_from(["min_value", "max_value"]),
       datetimes(timezones=timezones()))
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()


def test_underflow_in_simplify():
    # we shouldn't trigger a pytz bug when we're simplifying
    minimal(
        datetimes(max_value=dt.datetime.min + dt.timedelta(days=3),
コード例 #35
0
def test_utc_is_minimal():
    assert pytz.UTC is minimal(timezones())


def test_can_generate_non_naive_time():
    assert minimal(times(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC


def test_can_generate_non_naive_datetime():
    assert (
        minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC
    )


@given(datetimes(timezones=timezones()))
def test_timezone_aware_datetimes_are_timezone_aware(dt):
    assert dt.tzinfo is not None


@given(sampled_from(["min_value", "max_value"]), datetimes(timezones=timezones()))
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()


def test_underflow_in_simplify():
    # we shouldn't trigger a pytz bug when we're simplifying
    minimal(
        datetimes(
            max_value=dt.datetime.min + dt.timedelta(days=3), timezones=timezones()
コード例 #36
0
def test_timezones_arg_to_datetimes_must_be_search_strategy():
    with pytest.raises(InvalidArgument):
        datetimes(timezones=pytz.all_timezones).validate()
    with pytest.raises(InvalidArgument):
        tz = [pytz.timezone(t) for t in pytz.all_timezones]
        datetimes(timezones=tz).validate()
コード例 #37
0
    assert minimal(timedeltas(max_value=dt.timedelta(days=-10))).days == -10


@given(timedeltas())
def test_single_timedelta(val):
    assert find_any(timedeltas(val, val)) is val


def test_simplifies_towards_millenium():
    d = minimal(datetimes())
    assert d.year == 2000
    assert d.month == d.day == 1
    assert d.hour == d.minute == d.second == d.microsecond == 0


@given(datetimes())
def test_default_datetimes_are_naive(dt):
    assert dt.tzinfo is None


def test_bordering_on_a_leap_year():
    x = minimal(
        datetimes(
            dt.datetime.min.replace(year=2003), dt.datetime.max.replace(year=2005)
        ),
        lambda x: x.month == 2 and x.day == 29,
        timeout_after=60,
    )
    assert x.year == 2004

コード例 #38
0
ファイル: test_eve.py プロジェクト: DanRyanIrish/sunpy
@pytest.mark.remote_data
@pytest.mark.parametrize(
    'query',
    [(a.Time('2012/10/4', '2012/10/6') & a.Instrument('eve') & a.Level(0))])
def test_fido(query):
    qr = Fido.search(query)
    client = qr.get_response(0).client
    assert isinstance(qr, UnifiedResponse)
    assert isinstance(client, eve.EVEClient)
    response = Fido.fetch(qr)
    assert len(response) == qr._numfile


@pytest.mark.remote_data
@given(time_attr(time=datetimes(
    max_value=datetime.datetime(datetime.datetime.utcnow().year, 1, 1, 0, 0),
    min_value=datetime.datetime(2010, 1, 1, 0, 0),
)))
@settings(max_examples=2, timeout=240)
def test_levels(time):
    """
    Test the correct handling of level 0 / 1.
    The default should be level 1 from VSO, level 0 comes from EVEClient.
    """
    eve_a = a.Instrument('EVE')
    qr = Fido.search(time, eve_a)
    client = qr.get_response(0).client
    assert isinstance(client, VSOClient)

    qr = Fido.search(time, eve_a, a.Level(0))
    client = qr.get_response(0).client
    assert isinstance(client, eve.EVEClient)
コード例 #39
0
def tag_triples_pair(draw, dts=st.datetimes(), bools=st.booleans()):
    a = ("test_tag", draw(dts), draw(bools))
    b = ("test_tag", draw(dts), draw(bools))
    return (a, b)
コード例 #40
0
ファイル: test_35_indexes.py プロジェクト: nilp0inter/binlog
    cmp_python = cmp(python_value1, python_value2)
    cmp_db = cmp(db_value1, db_value2)

    return cmp_python == cmp_db


@given(python_value1=st.text(min_size=0, max_size=511),
       python_value2=st.text(min_size=0, max_size=511))
def test_TextIndex_is_sortable(python_value1, python_value2):
    assert _test_index_is_sortable(TextIndex.K,
                                   python_value1,
                                   python_value2)


@given(python_value1=st.integers(min_value=0, max_value=2**64-1),
       python_value2=st.integers(min_value=0, max_value=2**64-1))
@example(python_value1=1, python_value2=256)
def test_NumericIndex_is_sortable(python_value1, python_value2):
    assert _test_index_is_sortable(NumericIndex.K,
                                   python_value1,
                                   python_value2)


@given(python_value1=st.datetimes(min_value=datetime.fromtimestamp(0)),
       python_value2=st.datetimes(min_value=datetime.fromtimestamp(0)))
def test_DatetimeIndex_is_sortable(python_value1, python_value2):
    assert _test_index_is_sortable(DatetimeIndex.K,
                                   python_value1,
                                   python_value2)
コード例 #41
0
@pytest.mark.parametrize(
    "serializer,strategy",
    [(NumericSerializer, st.integers(
                             min_value=0,
                             max_value=2**64-1)),
     (TextSerializer, st.text(
                          min_size=0,
                          max_size=511)),
     (ObjectSerializer, st.dictionaries(
                            st.text(),
                            st.text())),
     (NullListSerializer, st.text(
                              min_size=1,
                              alphabet=ascii_letters + '.')),
     (DatetimeSerializer, st.datetimes(
                              min_value=datetime.fromtimestamp(0)))])
@given(st.data())
def test_serializers_conversion(serializer, strategy, data):
    python_value = expected = data.draw(strategy)
    current = serializer.python_value(
        memoryview(serializer.db_value(python_value)))

    assert current == expected


def test_nulllistserializer_invalid_values():
    with pytest.raises(ValueError):
        NullListSerializer.db_value('')

    with pytest.raises(ValueError):
        NullListSerializer.db_value('test\0')
コード例 #42
0
    print(schema._declared_fields)

    output = schema.load(serialize_df(sample_df, orient="records"))

    assert_frame_equal(output, sample_df)


@hypothesis.given(test_df=data_frames(
    columns=[
        column("int", dtype=int),
        column("float", dtype=float),
        column("bool", dtype=bool),
        column("chars", elements=st.characters()),
        column(
            "datetime",
            elements=st.datetimes(min_value=pd.Timestamp.min,
                                  max_value=pd.Timestamp.max),
            dtype="datetime64[s]",
        ),
    ],
    # records serialization format does not record indices, so we always
    # set them to an integer index.
    index=(indexes(elements=st.integers(
        min_value=np.iinfo(np.int64).min,
        max_value=np.iinfo(np.int64).max,
    ))),
))
def test_records_schema_hypothesis(test_df):

    if not len(test_df.index):
        # ignore empty datasets as dtype is impossible to infer from serialized
        return
コード例 #43
0
    YearEnd, YearBegin, BYearEnd, BYearBegin,
)

# ----------------------------------------------------------------
# Helpers for generating random data

with warnings.catch_warnings():
    warnings.simplefilter('ignore')
    min_dt = pd.Timestamp(1900, 1, 1).to_pydatetime(),
    max_dt = pd.Timestamp(1900, 1, 1).to_pydatetime(),

gen_date_range = st.builds(
    pd.date_range,
    start=st.datetimes(
        # TODO: Choose the min/max values more systematically
        min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
        max_value=pd.Timestamp(2100, 1, 1).to_pydatetime()
    ),
    periods=st.integers(min_value=2, max_value=100),
    freq=st.sampled_from('Y Q M D H T s ms us ns'.split()),
    tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)

gen_random_datetime = st.datetimes(
    min_value=min_dt,
    max_value=max_dt,
    timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones())
)

# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime information (e.g. type hints) to infer how to build them.
コード例 #44
0
from hypothesis import given, assume
from hypothesis import strategies as st

from dateutil import tz
from dateutil.parser import isoparse

import pytest

# Strategies
TIME_ZONE_STRATEGY = st.sampled_from([None, tz.tzutc()] +
    [tz.gettz(zname) for zname in ('US/Eastern', 'US/Pacific',
                                   'Australia/Sydney', 'Europe/London')])
ASCII_STRATEGY = st.characters(max_codepoint=127)


@pytest.mark.isoparser
@given(dt=st.datetimes(timezones=TIME_ZONE_STRATEGY), sep=ASCII_STRATEGY)
def test_timespec_auto(dt, sep):
    if dt.tzinfo is not None:
        # Assume offset has no sub-second components
        assume(dt.utcoffset().total_seconds() % 60 == 0)

    sep = str(sep)          # Python 2.7 requires bytes
    dtstr = dt.isoformat(sep=sep)
    dt_rt = isoparse(dtstr)

    assert dt_rt == dt
コード例 #45
0
ファイル: models.py プロジェクト: doismellburning/hypothesis
def get_datetime_strat():
    if getattr(django_settings, 'USE_TZ', False):
        return st.datetimes(timezones=timezones())
    return st.datetimes()
コード例 #46
0
ファイル: types.py プロジェクト: sunito/hypothesis
            'register_type_strategy' % (empty or thing,))
    return st.one_of(strategies)


_global_type_lookup = {
    # Types with core Hypothesis strategies
    type(None): st.none(),
    bool: st.booleans(),
    int: st.integers(),
    float: st.floats(),
    complex: st.complex_numbers(),
    fractions.Fraction: st.fractions(),
    decimal.Decimal: st.decimals(),
    text_type: st.text(),
    bytes: st.binary(),
    datetime.datetime: st.datetimes(),
    datetime.date: st.dates(),
    datetime.time: st.times(),
    datetime.timedelta: st.timedeltas(),
    uuid.UUID: st.uuids(),
    tuple: st.builds(tuple),
    list: st.builds(list),
    set: st.builds(set),
    frozenset: st.builds(frozenset),
    dict: st.builds(dict),
    # Built-in types
    type: st.sampled_from([type(None), bool, int, str, list, set, dict]),
    type(Ellipsis): st.just(Ellipsis),
    type(NotImplemented): st.just(NotImplemented),
    bytearray: st.binary().map(bytearray),
    memoryview: st.binary().map(memoryview),
コード例 #47
0
from math import inf, nan

import pandas as pd

import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.pytz import timezones
from hypothesis.strategies import datetimes, floats

from pysoleng.solar_geom import calculate_hour_angle_degrees

# Create time zone-aware datetimes for use in testing
aware_datetimes = datetimes(
    min_value=pd.Timestamp.min + pd.DateOffset(2),
    max_value=pd.Timestamp.max - pd.DateOffset(2),
    timezones=timezones(),
)


@pytest.mark.solar_geom
@given(
    aware_datetimes,
    floats(min_value=0, max_value=360, allow_nan=False, allow_infinity=False),
)
def test_calculate_hour_angle(dt, longitude):
    """Functional test to ensure the calculate_hour_angle() method
    runs properly given valid arguments."""
    assert isinstance(
        calculate_hour_angle_degrees(
            local_standard_time=dt, longitude_degrees=longitude
コード例 #48
0
    YearEnd,
)

# ----------------------------------------------------------------
# Helpers for generating random data

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    min_dt = Timestamp(1900, 1, 1).to_pydatetime()
    max_dt = Timestamp(1900, 1, 1).to_pydatetime()

gen_date_range = st.builds(
    pd.date_range,
    start=st.datetimes(
        # TODO: Choose the min/max values more systematically
        min_value=Timestamp(1900, 1, 1).to_pydatetime(),
        max_value=Timestamp(2100, 1, 1).to_pydatetime(),
    ),
    periods=st.integers(min_value=2, max_value=100),
    freq=st.sampled_from("Y Q M D H T s ms us ns".split()),
    tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)

gen_random_datetime = st.datetimes(
    min_value=min_dt,
    max_value=max_dt,
    timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)

# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime information (e.g. type hints) to infer how to build them.
コード例 #49
0
        return value

    return _strategy()


def primitives():
    return (st.integers() | st.floats(allow_nan=False) | st.text()
            | st.binary() | st.datetimes(timezones=timezones() | st.none())
            | st.dates() | st.times(timezones=timezones() | st.none())
            | st.timedeltas() | st.booleans() | st.none())


hashable_primitives = (st.booleans() | st.integers()
                       | st.floats(allow_nan=False) | st.text() | st.binary()
                       | st.datetimes() | st.timedeltas())


def identity(x):
    return x


def hashables():
    def extend(base):
        return base.flatmap(lambda strat: st.tuples(
            strat, st.sampled_from([
                st.tuples,
                st.frozensets,
            ]))).map(lambda strat__extend: strat__extend[1](strat__extend[0]))

    return st.recursive(hashable_primitives, extend)
コード例 #50
0
def primitives():
    return (st.integers() | st.floats(allow_nan=False) | st.text()
            | st.binary() | st.datetimes(timezones=timezones() | st.none())
            | st.dates() | st.times(timezones=timezones() | st.none())
            | st.timedeltas() | st.booleans() | st.none())
コード例 #51
0
def _for_datetime(field):
    if getattr(django.conf.settings, "USE_TZ", False):
        return st.datetimes(timezones=timezones())
    return st.datetimes()
コード例 #52
0
    assert minimal(timedeltas(max_value=dt.timedelta(days=-10))).days == -10


@given(timedeltas())
def test_single_timedelta(val):
    assert find_any(timedeltas(val, val)) is val


def test_simplifies_towards_millenium():
    d = minimal(datetimes())
    assert d.year == 2000
    assert d.month == d.day == 1
    assert d.hour == d.minute == d.second == d.microsecond == 0


@given(datetimes())
def test_default_datetimes_are_naive(dt):
    assert dt.tzinfo is None


def test_bordering_on_a_leap_year():
    x = minimal(
        datetimes(dt.datetime.min.replace(year=2003),
                  dt.datetime.max.replace(year=2005)),
        lambda x: x.month == 2 and x.day == 29,
        timeout_after=60,
    )
    assert x.year == 2004


def test_can_find_after_the_year_2000():
コード例 #53
0
ファイル: strategies.py プロジェクト: emkornfield/arrow
def arrays(draw, type, size=None):
    if isinstance(type, st.SearchStrategy):
        type = draw(type)
    elif not isinstance(type, pa.DataType):
        raise TypeError('Type must be a pyarrow DataType')

    if isinstance(size, st.SearchStrategy):
        size = draw(size)
    elif size is None:
        size = draw(_default_array_sizes)
    elif not isinstance(size, int):
        raise TypeError('Size must be an integer')

    shape = (size,)

    if pa.types.is_list(type):
        offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
        offsets = np.insert(offsets, 0, 0, axis=0)  # prepend with zero
        values = draw(arrays(type.value_type, size=int(offsets.sum())))
        return pa.ListArray.from_arrays(offsets, values)

    if pa.types.is_struct(type):
        h.assume(len(type) > 0)
        names, child_arrays = [], []
        for field in type:
            names.append(field.name)
            child_arrays.append(draw(arrays(field.type, size=size)))
        # fields' metadata are lost here, because from_arrays doesn't accept
        # a fields argumentum, only names
        return pa.StructArray.from_arrays(child_arrays, names=names)

    if (pa.types.is_boolean(type) or pa.types.is_integer(type) or
            pa.types.is_floating(type)):
        values = npst.arrays(type.to_pandas_dtype(), shape=(size,))
        np_arr = draw(values)
        if pa.types.is_floating(type):
            # Workaround ARROW-4952: no easy way to assert array equality
            # in a NaN-tolerant way.
            np_arr[np.isnan(np_arr)] = -42.0
        return pa.array(np_arr, type=type)

    if pa.types.is_null(type):
        value = st.none()
    elif pa.types.is_time(type):
        value = st.times()
    elif pa.types.is_date(type):
        value = st.dates()
    elif pa.types.is_timestamp(type):
        tz = pytz.timezone(type.tz) if type.tz is not None else None
        value = st.datetimes(timezones=st.just(tz))
    elif pa.types.is_binary(type):
        value = st.binary()
    elif pa.types.is_string(type):
        value = st.text()
    elif pa.types.is_decimal(type):
        # TODO(kszucs): properly limit the precision
        # value = st.decimals(places=type.scale, allow_infinity=False)
        h.reject()
    else:
        raise NotImplementedError(type)

    values = st.lists(value, min_size=size, max_size=size)
    return pa.array(draw(values), type=type)
コード例 #54
0
def test_simplifies_towards_millenium():
    d = minimal(datetimes())
    assert d.year == 2000
    assert d.month == d.day == 1
    assert d.hour == d.minute == d.second == d.microsecond == 0
コード例 #55
0
ファイル: test_parse_dates.py プロジェクト: pydata/pandas
from pandas.compat.numpy import np_array_datetime64_compat

import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm

import pandas.io.date_converters as conv
import pandas.io.parsers as parsers

# constant
_DEFAULT_DATETIME = datetime(1, 1, 1)

# Strategy for hypothesis
if is_platform_windows():
    date_strategy = st.datetimes(min_value=datetime(1900, 1, 1))
else:
    date_strategy = st.datetimes()


def test_separator_date_conflict(all_parsers):
    # Regression test for gh-4678
    #
    # Make sure thousands separator and
    # date parsing do not conflict.
    parser = all_parsers
    data = "06-02-2013;13:00;1-000.215"
    expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
                         columns=["Date", 2])

    df = parser.read_csv(StringIO(data), sep=";", thousands="-",
コード例 #56
0
def test_utc_is_minimal():
    assert pytz.UTC is minimal(timezones())


def test_can_generate_non_naive_time():
    assert minimal(times(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC


def test_can_generate_non_naive_datetime():
    assert (
        minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC
    )


@given(datetimes(timezones=timezones()))
def test_timezone_aware_datetimes_are_timezone_aware(dt):
    assert dt.tzinfo is not None


@given(sampled_from(["min_value", "max_value"]), datetimes(timezones=timezones()))
def test_datetime_bounds_must_be_naive(name, val):
    with pytest.raises(InvalidArgument):
        datetimes(**{name: val}).validate()


def test_underflow_in_simplify():
    # we shouldn't trigger a pytz bug when we're simplifying
    minimal(
        datetimes(
            max_value=dt.datetime.min + dt.timedelta(days=3), timezones=timezones()
コード例 #57
0
def test_can_generate_non_naive_datetime():
    assert minimal(datetimes(timezones=timezones()),
                   lambda d: d.tzinfo).tzinfo == tz.UTC
コード例 #58
0
def test_can_trigger_error_in_draw_near_boundary(bound):
    assert_can_trigger_event(
        datetimes(**bound, timezones=timezones()),
        lambda event: "Failed to draw a datetime" in event,
    )
コード例 #59
0
def test_timezones_arg_to_datetimes_must_be_search_strategy():
    all_timezones = zoneinfo.get_zonefile_instance().zones
    with pytest.raises(InvalidArgument):
        datetimes(timezones=all_timezones).validate()
コード例 #60
0
ファイル: test_citytime.py プロジェクト: tweyter/CityTime
def test_uninitialized():
    ct = CityTime(time=None)
    assert str(ct) == 'CityTime object not set yet.'


def test_uninitialized__repr__():
    CityTime().__repr__(), "CityTime object not set yet."


def test_using_unset():
    ct = CityTime()
    with raises(ValueError):
        ct.astimezone('utc')


@given(datetimes(timezones=t_zones()))
def test_set_t_zone(dt):
    ct = CityTime()
    ct.set(dt, str(dt.tzinfo))
    assert ct.is_set()
    assert ct.timezone() == str(dt.tzinfo)


@given(datetimes(timezones=t_zones()))
def test_set_datetime(dt):
    ct = CityTime(dt, str(dt.tzinfo))
    assert ct.is_set()
    assert ct.utc().tzinfo == pytz.timezone('UTC')
    assert ct.utc() == dt