Beispiel #1
0
def test_distribution(normalization, with_errors, units):
    t, y, dy, fmax = null_data(units=units)

    if not with_errors:
        dy = None

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    z = np.linspace(0, power.max(), 1000)

    # Test that pdf and cdf are consistent
    dz = z[1] - z[0]
    z_mid = z[:-1] + 0.5 * dz
    pdf = ls.distribution(z_mid)
    cdf = ls.distribution(z, cumulative=True)
    if isinstance(dz, u.Quantity):
        dz = dz.value
    assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)

    # psd normalization without specified errors produces bad results
    if not (normalization == 'psd' and not with_errors):
        # Test that observed power is distributed according to the theoretical pdf
        hist, bins = np.histogram(power, 30, density=True)
        midpoints = 0.5 * (bins[1:] + bins[:-1])
        pdf = ls.distribution(midpoints)
        assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
Beispiel #2
0
def test_false_alarm_equivalence(method, normalization, use_errs):
    # Note: the PSD normalization is not equivalent to the others, in that it
    # depends on the absolute errors rather than relative errors. Because the
    # scaling contributes to the distribution, it cannot be converted directly
    # from any of the three normalized versions.
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy = make_data()
    if not use_errs:
        dy = None
    fmax = 5

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)
    fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
                                     method=method, method_kwds=kwds)

    # Compute the equivalent Z values in the standard normalization
    # and check that the FAP is consistent
    Z_std = convert_normalization(Z, len(t),
                                  from_normalization=normalization,
                                  to_normalization='standard',
                                  chi2_ref=compute_chi2_ref(y, dy))
    ls = LombScargle(t, y, dy, normalization='standard')
    fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax,
                                         method=method, method_kwds=kwds)

    assert_allclose(fap, fap_std, rtol=0.1)
Beispiel #3
0
def test_false_alarm_equivalence(method, normalization, use_errs, units):
    # Note: the PSD normalization is not equivalent to the others, in that it
    # depends on the absolute errors rather than relative errors. Because the
    # scaling contributes to the distribution, it cannot be converted directly
    # from any of the three normalized versions.
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy, fmax = make_data(units=units)
    if not use_errs:
        dy = None

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)
    fap = ls.false_alarm_probability(Z,
                                     maximum_frequency=fmax,
                                     method=method,
                                     method_kwds=kwds)

    # Compute the equivalent Z values in the standard normalization
    # and check that the FAP is consistent
    Z_std = convert_normalization(Z,
                                  len(t),
                                  from_normalization=normalization,
                                  to_normalization='standard',
                                  chi2_ref=compute_chi2_ref(y, dy))
    ls = LombScargle(t, y, dy, normalization='standard')
    fap_std = ls.false_alarm_probability(Z_std,
                                         maximum_frequency=fmax,
                                         method=method,
                                         method_kwds=kwds)

    assert_allclose(fap, fap_std, rtol=0.1)
Beispiel #4
0
def test_autopower(data):
    t, y, dy = data
    ls = LombScargle(t, y, dy)
    kwargs = dict(samples_per_peak=6, nyquist_factor=2,
                  minimum_frequency=2, maximum_frequency=None)
    freq1 = ls.autofrequency(**kwargs)
    power1 = ls.power(freq1)
    freq2, power2 = ls.autopower(**kwargs)

    assert_allclose(freq1, freq2)
    assert_allclose(power1, power2)
Beispiel #5
0
def test_autopower(data):
    t, y, dy = data
    ls = LombScargle(t, y, dy)
    kwargs = dict(samples_per_peak=6, nyquist_factor=2,
                  minimum_frequency=2, maximum_frequency=None)
    freq1 = ls.autofrequency(**kwargs)
    power1 = ls.power(freq1)
    freq2, power2 = ls.autopower(**kwargs)

    assert_allclose(freq1, freq2)
    assert_allclose(power1, power2)
Beispiel #6
0
def test_false_alarm_smoketest(method, normalization):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy = make_data()
    fmax = 5

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)

    fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
                                     method=method, method_kwds=kwds)

    assert len(fap) == len(Z)
    if method != 'davies':
        assert np.all(fap <= 1)
        assert np.all(fap[:-1] >= fap[1:])  # monotonically decreasing
Beispiel #7
0
def test_false_alarm_smoketest(method, normalization, units):
    if not HAS_SCIPY and method in ['baluev', 'davies']:
        pytest.skip("SciPy required")

    kwds = METHOD_KWDS.get(method, None)
    t, y, dy, fmax = make_data(units=units)

    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    Z = np.linspace(power.min(), power.max(), 30)

    fap = ls.false_alarm_probability(Z,
                                     maximum_frequency=fmax,
                                     method=method,
                                     method_kwds=kwds)

    assert len(fap) == len(Z)
    if method != 'davies':
        assert np.all(fap <= 1)
        assert np.all(fap[:-1] >= fap[1:])  # monotonically decreasing
Beispiel #8
0
def test_distribution(null_data, normalization, with_errors, fmax=40):
    t, y, dy = null_data
    if not with_errors:
        dy = None

    N = len(t)
    ls = LombScargle(t, y, dy, normalization=normalization)
    freq, power = ls.autopower(maximum_frequency=fmax)
    z = np.linspace(0, power.max(), 1000)

    # Test that pdf and cdf are consistent
    dz = z[1] - z[0]
    z_mid = z[:-1] + 0.5 * dz
    pdf = ls.distribution(z_mid)
    cdf = ls.distribution(z, cumulative=True)
    assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)

    # psd normalization without specified errors produces bad results
    if not (normalization == 'psd' and not with_errors):
        # Test that observed power is distributed according to the theoretical pdf
        hist, bins = np.histogram(power, 30, normed=True)
        midpoints = 0.5 * (bins[1:] + bins[:-1])
        pdf = ls.distribution(midpoints)
        assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
Beispiel #9
0
def test_absolute_times(data, timedelta):

    # Make sure that we handle absolute times correctly. We also check that
    # TimeDelta works properly when timedelta is True.

    # The example data uses relative times
    t, y, dy = data

    # FIXME: There seems to be a numerical stability issue in that if we run
    # the algorithm with the same values but offset in time, the transit_time
    # is not offset by a fixed amount. To avoid this issue in this test, we
    # make sure the first time is also the smallest so that internally the
    # values of the relative time should be the same.
    t[0] = 0.

    # Add units
    t = t * u.day
    y = y * u.mag
    dy = dy * u.mag

    # We now construct a set of absolute times but keeping the rest the same
    start = Time('2019-05-04T12:34:56')
    trel = TimeDelta(t) if timedelta else t
    t = trel + start

    # and we set up two instances of LombScargle, one with absolute and one
    # with relative times.
    ls1 = LombScargle(t, y, dy)
    ls2 = LombScargle(trel, y, dy)

    kwargs = dict(samples_per_peak=6,
                  nyquist_factor=2,
                  minimum_frequency=2 / u.day,
                  maximum_frequency=None)

    freq1 = ls1.autofrequency(**kwargs)
    freq2 = ls2.autofrequency(**kwargs)
    assert_quantity_allclose(freq1, freq2)

    power1 = ls1.power(freq1)
    power2 = ls2.power(freq2)
    assert_quantity_allclose(power1, power2)

    freq1, power1 = ls1.autopower(**kwargs)
    freq2, power2 = ls2.autopower(**kwargs)
    assert_quantity_allclose(freq1, freq2)
    assert_quantity_allclose(power1, power2)

    model1 = ls1.model(t, 2 / u.day)
    model2 = ls2.model(trel, 2 / u.day)
    assert_quantity_allclose(model1, model2)

    # Check model validation

    with pytest.raises(TypeError) as exc:
        ls1.model(trel, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.model(t, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')

    # Check design matrix

    design1 = ls1.design_matrix(2 / u.day, t=t)
    design2 = ls2.design_matrix(2 / u.day, t=trel)
    assert_quantity_allclose(design1, design2)

    # Check design matrix validation

    with pytest.raises(TypeError) as exc:
        ls1.design_matrix(2 / u.day, t=trel)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.design_matrix(2 / u.day, t=t)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')
Beispiel #10
0
def test_absolute_times(data, timedelta):

    # Make sure that we handle absolute times correctly. We also check that
    # TimeDelta works properly when timedelta is True.

    # The example data uses relative times
    t, y, dy = data

    # FIXME: There seems to be a numerical stability issue in that if we run
    # the algorithm with the same values but offset in time, the transit_time
    # is not offset by a fixed amount. To avoid this issue in this test, we
    # make sure the first time is also the smallest so that internally the
    # values of the relative time should be the same.
    t[0] = 0.

    # Add units
    t = t * u.day
    y = y * u.mag
    dy = dy * u.mag

    # We now construct a set of absolute times but keeping the rest the same
    start = Time('2019-05-04T12:34:56')
    trel = TimeDelta(t) if timedelta else t
    t = trel + start

    # and we set up two instances of LombScargle, one with absolute and one
    # with relative times.
    ls1 = LombScargle(t, y, dy)
    ls2 = LombScargle(trel, y, dy)

    kwargs = dict(samples_per_peak=6, nyquist_factor=2,
                  minimum_frequency=2 / u.day, maximum_frequency=None)

    freq1 = ls1.autofrequency(**kwargs)
    freq2 = ls2.autofrequency(**kwargs)
    assert_quantity_allclose(freq1, freq2)

    power1 = ls1.power(freq1)
    power2 = ls2.power(freq2)
    assert_quantity_allclose(power1, power2)

    freq1, power1 = ls1.autopower(**kwargs)
    freq2, power2 = ls2.autopower(**kwargs)
    assert_quantity_allclose(freq1, freq2)
    assert_quantity_allclose(power1, power2)

    model1 = ls1.model(t, 2 / u.day)
    model2 = ls2.model(trel, 2 / u.day)
    assert_quantity_allclose(model1, model2)

    # Check model validation

    with pytest.raises(TypeError) as exc:
        ls1.model(trel, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.model(t, 2 / u.day)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')

    # Check design matrix

    design1 = ls1.design_matrix(2 / u.day, t=t)
    design2 = ls2.design_matrix(2 / u.day, t=trel)
    assert_quantity_allclose(design1, design2)

    # Check design matrix validation

    with pytest.raises(TypeError) as exc:
        ls1.design_matrix(2 / u.day, t=trel)
    assert exc.value.args[0] == ('t was provided as a relative time but the '
                                 'LombScargle class was initialized with '
                                 'absolute times.')

    with pytest.raises(TypeError) as exc:
        ls2.design_matrix(2 / u.day, t=t)
    assert exc.value.args[0] == ('t was provided as an absolute time but the '
                                 'LombScargle class was initialized with '
                                 'relative times.')