Beispiel #1
0
def test_false_alarm_equivalence(method, normalization, use_errs):
    # Note: the PSD normalization is not equivalent to the others, in that it
    # depends on the absolute errors rather than relative errors. Because the
    # scaling contributes to the distribution, it cannot be converted directly
    # from any of the three normalized versions.
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy = make_data()
    if not use_errs:
        dy = None
    fmax = 5

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)
    fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
                                     method=method, method_kwds=kwds)

    # Compute the equivalent Z values in the standard normalization
    # and check that the FAP is consistent
    Z_std = convert_normalization(Z, len(t),
                                  from_normalization=normalization,
                                  to_normalization='standard',
                                  chi2_ref=compute_chi2_ref(y, dy))
    ls = LombScargle(t, y, dy, normalization='standard')
    fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax,
                                         method=method, method_kwds=kwds)

    assert_allclose(fap, fap_std, rtol=0.1)
Beispiel #2
0
def test_nterms_methods(method, center_data, fit_mean, errors,
                        nterms, normalization, data):
    t, y, dy = data
    frequency = 0.8 + 0.01 * np.arange(40)

    if errors == 'none':
        dy = None
    elif errors == 'partial':
        dy = dy[0]
    elif errors == 'full':
        pass
    else:
        raise ValueError("Unrecognized error type: '{0}'".format(errors))

    ls = LombScargle(t, y, dy, center_data=center_data,
                     fit_mean=fit_mean, nterms=nterms,
                     normalization=normalization)

    if nterms == 0 and not fit_mean:
        with pytest.raises(ValueError) as err:
            ls.power(frequency, method=method)
        assert 'nterms' in str(err.value) and 'bias' in str(err.value)
    else:
        P_expected = ls.power(frequency)

        # don't use fast fft approximations here
        kwds = {}
        if 'fast' in method:
            kwds['method_kwds'] = dict(use_fft=False)
        P_method = ls.power(frequency, method=method, **kwds)

        assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25)
Beispiel #3
0
def test_autopower(data):
    t, y, dy = data
    ls = LombScargle(t, y, dy)
    kwargs = dict(samples_per_peak=6, nyquist_factor=2,
                  minimum_frequency=2, maximum_frequency=None)
    freq1 = ls.autofrequency(**kwargs)
    power1 = ls.power(freq1)
    freq2, power2 = ls.autopower(**kwargs)

    assert_allclose(freq1, freq2)
    assert_allclose(power1, power2)
Beispiel #4
0
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
    t, y, dy = data
    t_fit = t[:5]
    frequency = 1.0

    t = t * t_unit
    t_fit = t_fit * t_unit
    y = y * y_unit
    dy = dy * y_unit
    frequency = frequency * frequency_unit

    ls = LombScargle(t, y, dy)
    y_fit = ls.model(t_fit, frequency)
    assert y_fit.unit == y_unit
Beispiel #5
0
def test_false_alarm_equivalence(method, normalization, use_errs, units):
    # Note: the PSD normalization is not equivalent to the others, in that it
    # depends on the absolute errors rather than relative errors. Because the
    # scaling contributes to the distribution, it cannot be converted directly
    # from any of the three normalized versions.
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy, fmax = make_data(units=units)
    if not use_errs:
        dy = None

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)
    fap = ls.false_alarm_probability(Z,
                                     maximum_frequency=fmax,
                                     method=method,
                                     method_kwds=kwds)

    # Compute the equivalent Z values in the standard normalization
    # and check that the FAP is consistent
    Z_std = convert_normalization(Z,
                                  len(t),
                                  from_normalization=normalization,
                                  to_normalization='standard',
                                  chi2_ref=compute_chi2_ref(y, dy))
    ls = LombScargle(t, y, dy, normalization='standard')
    fap_std = ls.false_alarm_probability(Z_std,
                                         maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=kwds)

    assert_allclose(fap, fap_std, rtol=0.1)
Beispiel #6
0
def test_model(fit_mean, with_units, freq):
    rand = np.random.RandomState(0)
    t = 10 * rand.rand(40)
    params = 10 * rand.rand(3)

    y = np.zeros_like(t)
    if fit_mean:
        y += params[0]
    y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))

    if with_units:
        t = t * u.day
        y = y * u.mag
        freq = freq / u.day

    ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean)
    y_fit = ls.model(t, freq)
    assert_quantity_allclose(y_fit, y)
Beispiel #7
0
def test_false_alarm_smoketest(method, normalization):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy = make_data()
    fmax = 5

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)

    fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
                                     method=method, method_kwds=kwds)

    assert len(fap) == len(Z)
    if method != 'davies':
        assert np.all(fap <= 1)
        assert np.all(fap[:-1] >= fap[1:])  # monotonically decreasing
Beispiel #8
0
def test_inverse_bootstrap(null_data, normalization, use_errs, fmax=5):
    t, y, dy = null_data
    if not use_errs:
        dy = None

    fap = np.linspace(0, 1, 10)
    method = 'bootstrap'
    method_kwds = METHOD_KWDS['bootstrap']

    ls = LombScargle(t, y, dy, normalization=normalization)

    z = ls.false_alarm_level(fap, maximum_frequency=fmax,
                             method=method, method_kwds=method_kwds)
    fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=method_kwds)

    # atol = 1 / n_bootstraps
    assert_allclose(fap, fap_out, atol=0.05)
Beispiel #9
0
def test_inverses(method, normalization, use_errs, N, T=5, fmax=5):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    t, y, dy = make_data(N, rseed=543)
    if not use_errs:
        dy = None
    method_kwds = METHOD_KWDS.get(method, None)

    fap = np.logspace(-10, 0, 10)

    ls = LombScargle(t, y, dy, normalization=normalization)
    z = ls.false_alarm_level(fap, maximum_frequency=fmax,
                             method=method,
                             method_kwds=method_kwds)
    fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=method_kwds)
    assert_allclose(fap, fap_out)
Beispiel #10
0
def test_all_methods(data, method, center_data, fit_mean,
                     errors, with_units, normalization):
    if method == 'scipy' and (fit_mean or errors != 'none'):
        return

    t, y, dy = data
    frequency = 0.8 + 0.01 * np.arange(40)
    if with_units:
        t = t * u.day
        y = y * u.mag
        dy = dy * u.mag
        frequency = frequency / t.unit

    if errors == 'none':
        dy = None
    elif errors == 'partial':
        dy = dy[0]
    elif errors == 'full':
        pass
    else:
        raise ValueError("Unrecognized error type: '{0}'".format(errors))

    kwds = {}

    ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean,
                     normalization=normalization)
    P_expected = ls.power(frequency)

    # don't use the fft approximation here; we'll test this elsewhere
    if method in FAST_METHODS:
        kwds['method_kwds'] = dict(use_fft=False)
    P_method = ls.power(frequency, method=method, **kwds)

    if with_units:
        if normalization == 'psd' and errors == 'none':
            assert P_method.unit == y.unit ** 2
        else:
            assert P_method.unit == u.dimensionless_unscaled
    else:
        assert not hasattr(P_method, 'unit')

    assert_quantity_allclose(P_expected, P_method)
Beispiel #11
0
def test_false_alarm_smoketest(method, normalization, units):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy, fmax = make_data(units=units)

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)

    fap = ls.false_alarm_probability(Z,
                                     maximum_frequency=fmax,
                                     method=method,
                                     method_kwds=kwds)

    assert len(fap) == len(Z)
    if method != 'davies':
        assert np.all(fap <= 1)
        assert np.all(fap[:-1] >= fap[1:])  # monotonically decreasing
Beispiel #12
0
def test_inverses(method, normalization, use_errs, N, T=5, fmax=5):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    t, y, dy = make_data(N, rseed=543)
    if not use_errs:
        dy = None
    method_kwds = METHOD_KWDS.get(method, None)

    fap = np.logspace(-10, 0, 10)

    ls = LombScargle(t, y, dy, normalization=normalization)
    z = ls.false_alarm_level(fap,
                             maximum_frequency=fmax,
                             method=method,
                             method_kwds=method_kwds)
    fap_out = ls.false_alarm_probability(z,
                                         maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=method_kwds)
    assert_allclose(fap, fap_out)
Beispiel #13
0
def test_distribution(normalization, with_errors, units):
    t, y, dy, fmax = null_data(units=units)

    if not with_errors:
        dy = None

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    z = np.linspace(0, power.max(), 1000)

    # Test that pdf and cdf are consistent
    dz = z[1] - z[0]
    z_mid = z[:-1] + 0.5 * dz
    pdf = ls.distribution(z_mid)
    cdf = ls.distribution(z, cumulative=True)
    if isinstance(dz, u.Quantity):
        dz = dz.value
    assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)

    # psd normalization without specified errors produces bad results
    if not (normalization == 'psd' and not with_errors):
        # Test that observed power is distributed according to the theoretical pdf
        hist, bins = np.histogram(power, 30, density=True)
        midpoints = 0.5 * (bins[1:] + bins[:-1])
        pdf = ls.distribution(midpoints)
        assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
Beispiel #14
0
def test_inverse_bootstrap(null_data, normalization, use_errs, fmax=5):
    t, y, dy = null_data
    if not use_errs:
        dy = None

    fap = np.linspace(0, 1, 10)
    method = 'bootstrap'
    method_kwds = METHOD_KWDS['bootstrap']

    ls = LombScargle(t, y, dy, normalization=normalization)

    z = ls.false_alarm_level(fap,
                             maximum_frequency=fmax,
                             method=method,
                             method_kwds=method_kwds)
    fap_out = ls.false_alarm_probability(z,
                                         maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=method_kwds)

    # atol = 1 / n_bootstraps
    assert_allclose(fap, fap_out, atol=0.05)
Beispiel #15
0
def test_model_parameters(data, nterms, fit_mean, center_data,
                          errors, with_units):
    if nterms == 0 and not fit_mean:
        return

    t, y, dy = data
    frequency = 1.5
    if with_units:
        t = t * u.day
        y = y * u.mag
        dy = dy * u.mag
        frequency = frequency / t.unit

    if errors == 'none':
        dy = None
    elif errors == 'partial':
        dy = dy[0]
    elif errors == 'full':
        pass
    else:
        raise ValueError("Unrecognized error type: '{0}'".format(errors))

    ls = LombScargle(t, y, dy,
                     nterms=nterms,
                     fit_mean=fit_mean,
                     center_data=center_data)
    tfit = np.linspace(0, 20, 10)
    if with_units:
        tfit = tfit * u.day

    model = ls.model(tfit, frequency)
    params = ls.model_parameters(frequency)
    design = ls.design_matrix(frequency, t=tfit)
    offset = ls.offset()

    assert len(params) == int(fit_mean) + 2 * nterms

    assert_quantity_allclose(offset + design.dot(params), model)
Beispiel #16
0
def test_distribution(null_data, normalization, with_errors, fmax=40):
    t, y, dy = null_data
    if not with_errors:
        dy = None

    N = len(t)
    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    z = np.linspace(0, power.max(), 1000)

    # Test that pdf and cdf are consistent
    dz = z[1] - z[0]
    z_mid = z[:-1] + 0.5 * dz
    pdf = ls.distribution(z_mid)
    cdf = ls.distribution(z, cumulative=True)
    assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)

    # psd normalization without specified errors produces bad results
    if not (normalization == 'psd' and not with_errors):
        # Test that observed power is distributed according to the theoretical pdf
        hist, bins = np.histogram(power, 30, normed=True)
        midpoints = 0.5 * (bins[1:] + bins[:-1])
        pdf = ls.distribution(midpoints)
        assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
Beispiel #17
0
def test_fast_approximations(method, center_data, fit_mean,
                             errors, nterms, data):
    t, y, dy = data
    frequency = 0.8 + 0.01 * np.arange(40)

    if errors == 'none':
        dy = None
    elif errors == 'partial':
        dy = dy[0]
    elif errors == 'full':
        pass
    else:
        raise ValueError("Unrecognized error type: '{0}'".format(errors))

    ls = LombScargle(t, y, dy, center_data=center_data,
                     fit_mean=fit_mean, nterms=nterms,
                     normalization='standard')

    # use only standard normalization because we compare via absolute tolerance
    kwds = dict(method=method)

    if method == 'fast' and nterms != 1:
        with pytest.raises(ValueError) as err:
            ls.power(frequency, **kwds)
        assert 'nterms' in str(err.value)

    elif nterms == 0 and not fit_mean:
        with pytest.raises(ValueError) as err:
            ls.power(frequency, **kwds)
        assert 'nterms' in str(err.value) and 'bias' in str(err.value)

    else:
        P_fast = ls.power(frequency, **kwds)
        kwds['method_kwds'] = dict(use_fft=False)
        P_slow = ls.power(frequency, **kwds)

        assert_allclose(P_fast, P_slow, atol=0.008)
Beispiel #18
0
def test_absolute_times(data, timedelta):

    # Make sure that we handle absolute times correctly. We also check that
    # TimeDelta works properly when timedelta is True.

    # The example data uses relative times
    t, y, dy = data

    # FIXME: There seems to be a numerical stability issue in that if we run
    # the algorithm with the same values but offset in time, the transit_time
    # is not offset by a fixed amount. To avoid this issue in this test, we
    # make sure the first time is also the smallest so that internally the
    # values of the relative time should be the same.
    t[0] = 0.

    # Add units
    t = t * u.day
    y = y * u.mag
    dy = dy * u.mag

    # We now construct a set of absolute times but keeping the rest the same
    start = Time('2019-05-04T12:34:56')
    trel = TimeDelta(t) if timedelta else t
    t = trel + start

    # and we set up two instances of LombScargle, one with absolute and one
    # with relative times.
    ls1 = LombScargle(t, y, dy)
    ls2 = LombScargle(trel, y, dy)

    kwargs = dict(samples_per_peak=6, nyquist_factor=2,
                  minimum_frequency=2 / u.day, maximum_frequency=None)

    freq1 = ls1.autofrequency(**kwargs)
    freq2 = ls2.autofrequency(**kwargs)
    assert_quantity_allclose(freq1, freq2)

    power1 = ls1.power(freq1)
    power2 = ls2.power(freq2)
    assert_quantity_allclose(power1, power2)

    freq1, power1 = ls1.autopower(**kwargs)
    freq2, power2 = ls2.autopower(**kwargs)
    assert_quantity_allclose(freq1, freq2)
    assert_quantity_allclose(power1, power2)

    model1 = ls1.model(t, 2 / u.day)
    model2 = ls2.model(trel, 2 / u.day)
    assert_quantity_allclose(model1, model2)

    # Check model validation

    with pytest.raises(TypeError) as exc:
        ls1.model(trel, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.model(t, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')

    # Check design matrix

    design1 = ls1.design_matrix(2 / u.day, t=t)
    design2 = ls2.design_matrix(2 / u.day, t=trel)
    assert_quantity_allclose(design1, design2)

    # Check design matrix validation

    with pytest.raises(TypeError) as exc:
        ls1.design_matrix(2 / u.day, t=trel)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.design_matrix(2 / u.day, t=t)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')