Exemple #1
0
def test_ptp():

	time = np.linspace(0, 27, 1000)
	flux = np.zeros(len(time))

	p = ptp(LightCurve(time=time, flux=flux))
	print(p)
	np.testing.assert_allclose(p, 0)

	p = ptp(LightCurve(time=time, flux=flux*np.nan))
	print(p)
	assert np.isnan(p), "Should return nan on pure nan input"

	p = ptp(LightCurve(time=[], flux=[]))
	print(p)
	assert np.isnan(p), "Should return nan on empty input"

	# Pure nan in the time-column should raise ValueError:
	with pytest.raises(ValueError):
		with warnings.catch_warnings():
			warnings.filterwarnings('ignore', category=LightkurveWarning, message='LightCurve object contains NaN times')
			p = ptp(LightCurve(time=time*np.nan, flux=flux))

	# Test with constant lightcurve should return zero:
	flux = np.full(100, np.pi)
	time = np.linspace(0, 27, len(flux))
	p = ptp(LightCurve(time=time, flux=flux))
	print(p)
	np.testing.assert_allclose(p, 0)
Exemple #2
0
def test_basic_ascii_io():
    """Verify we do not break the basic ascii i/o functionality provided by AstroPy Table."""
    # Part I: Can we read a LightCurve from a CSV file?
    csvfile = tempfile.NamedTemporaryFile(
        delete=False)  # using delete=False to make tests pass on Windows
    try:
        csvfile.write(
            b"time,flux,flux_err,color\n1,2,3,red\n4,5,6,green\n7,8,9,blue")
        csvfile.flush()
        lc_csv = LightCurve.read(csvfile.name, format="ascii.csv")
        assert lc_csv.time[0].value == 1
        assert lc_csv.flux[1] == 5
        assert lc_csv.color[2] == "blue"
    finally:
        csvfile.close()
        os.remove(csvfile.name)

    # Part II: can we write the light curve to a tab-separated ascii file, and read it back in?
    tabfile = tempfile.NamedTemporaryFile(delete=False)
    try:
        lc_csv.write(tabfile.name, format="ascii.tab", overwrite=True)
        lc_rst = LightCurve.read(tabfile.name, format="ascii.tab")
        assert lc_rst.color[2] == "blue"
        assert (lc_csv == lc_rst).all()
    finally:
        tabfile.close()
        os.remove(tabfile.name)
def test_designmatrix_prior_type():
    """Regression test for #982: prior_mu and prior_sigma should not be Quantity objects."""
    size = 10
    lc = LightCurve(flux=np.random.normal(loc=1.0, scale=0.1, size=size))
    corr = lc.to_corrector("sff")
    corr.correct(centroid_col=np.random.normal(loc=1.0, scale=0.1, size=size),
                 centroid_row=np.random.normal(loc=1.0, scale=0.1, size=size),
                 windows=1)
    assert "Quantity" not in str(type(corr.design_matrix_collection.prior_mu))
    assert "Quantity" not in str(
        type(corr.design_matrix_collection.prior_sigma))
Exemple #4
0
def test_zero_fluxerr():
    """Regression test for #668.

    Flux uncertainties smaller than or equal to zero (`lc.flux_err <= 0`) will
    trigger an invalid or non-finite matrix.  We expect `RegressionCorrector`
    to detect this and yield a graceful `ValueError`."""
    lc = LightCurve(flux=[5, 10], flux_err=[1, 0])
    with pytest.raises(ValueError):
        RegressionCorrector(lc)
    lc = LightCurve(flux=[5, 10], flux_err=[1, -10])
    with pytest.raises(ValueError):
        RegressionCorrector(lc)
Exemple #5
0
def m1(a, b):
    GlobalVars.register('loop', asyncio.get_event_loop())

    mask = np.logical_and(a >= -0.5, a <= 0.5)
    mask[0] = False
    mask[-1] = False
    fu = sigma_clip(a[mask], b[mask])
    other_a = a[~mask]
    other_b = b[~mask]

    func = interp1d(other_a, other_b, kind='linear', fill_value='extrapolate')
    interp_b = func(a[mask])

    flat_b = b.copy()
    flat_b[mask] = interp_b

    lc, trend_lc = LightCurve(a, flat_b).flatten(return_trend=True,
                                                 window_length=7,
                                                 polyorder=3,
                                                 niters=10)
    prev = b[mask].copy()
    a, b[mask] = GlobalVars.get_var('loop').run_until_complete(fu)

    a, flat_b = lc.time, lc.flux
    flat_b[mask] = b[mask]
    print(np.all(prev == b[mask]))
    return a, flat_b, trend_lc
Exemple #6
0
def flatten_interp_transits(all_time, all_flux, period, t0, duration):
    """
    all_time, all_flux: real time of the lightcurve, before folding.\n
    duration: in Days
    return the flattened time, flux, not folding
    """
    fold_time = all_time % period
    t0 %= period
    half_duration = duration / 2.0
    mask = np.logical_and(fold_time <= t0 + half_duration,
                          fold_time >= t0 - half_duration)

    tce_time, tce = all_time[mask], all_flux[mask]

    lc = LightCurve(time=all_time, flux=all_flux).flatten(window_length=501,
                                                          polyorder=2,
                                                          break_tolerance=40,
                                                          sigma=3)

    all_time, flat_flux = lc.time, lc.flux

    # keep the transit original
    flat_flux[mask] = tce

    # sigma clip the outliers in the transit
    # tce_time, tce = sigma_clip(tce_time, tce, sigma=3.0)

    try:
        return all_time.value, flat_flux.value
    except:
        return all_time, flat_flux
Exemple #7
0
def test_sweet_vetter():
    """Tests the interface of the Sweet vetter class, without worrying about the
    correctness of the implementation
    """
    period = 8. * u.day
    epoch_bkjd = 133. * u.day
    duration = 4.0 * u.hour
    target_name = "Dummy Data"
    event_name = "Data used to test the interface"

    tce = Tce(period=period,
              epoch=epoch_bkjd,
              epoch_offset=exo_const.bkjd,
              duration=duration,
              depth=0 * exo_const.frac_amp,
              target_name=target_name,
              event_name=event_name)

    # from lightkurve.lightcurve import LightCurve
    rng = np.random.default_rng(seed=1234)
    time = np.arange(1000)
    flux = 10 + rng.standard_normal(1000)
    lc = LightCurve(time, flux, time_format='bkjd')

    sweet_vetter = Sweet()
    res = sweet_vetter.run(tce, lc)
    sweet_vetter.plot()

    assert isinstance(res, dict)
    assert 'msg' in res.keys()
    assert 'amp' in res.keys()
    amp = res['amp']
    assert amp.ndim == 2
    assert amp.shape == (3, 3)
def test_align_to_lc():
    """ Test to ensure we can properly align different light curves
    """

    time = np.arange(1, 100, 0.1)
    lc1 = LightCurve(time=time, flux=1, flux_err=0.0)
    lc1['cadenceno'] = np.arange(1, len(time) + 1)
    lc2 = LightCurve(time=time, flux=2, flux_err=0.0)
    lc2['cadenceno'] = np.arange(1, len(time) + 1)

    # Remove different cadences from both light curve and align the second to the first
    lc1 = lc1[0:10].append(lc1[20:100])
    lc2 = lc2[0:50].append(lc2[70:100])

    aligned_lc2 = _align_to_lc(lc2, lc1)

    assert np.all(lc1['cadenceno'] == aligned_lc2['cadenceno'])
Exemple #9
0
def test_nan_input():
    # The following light curves should all raise ValueErrors because of NaNs
    with warnings.catch_warnings():
        # Instantiating light curves with NaN times will yield a warning
        warnings.simplefilter("ignore", LightkurveWarning)
        lcs = [
            LightCurve(flux=[5, 10], flux_err=[np.nan, 1]),
            LightCurve(flux=[np.nan, 10], flux_err=[1, 1]),
        ]

    # Passing these to RegressionCorrector should raise a ValueError
    for lc in lcs:
        with pytest.raises(ValueError):
            RegressionCorrector(lc)

    # However, we should be flexible with letting `flux_err` be all-NaNs,
    # because it is common for errors to be missing.
    lc = LightCurve(flux=[5, 10], flux_err=[np.nan, np.nan])
    RegressionCorrector(lc)
Exemple #10
0
def test_against_fleck(test_input, expected):
    from fleck import Star
    import astropy.units as u

    # Unpack the input
    lons, lats, rads, inc = test_input

    # Create a time axis and compute the light curve on that axis
    times = np.linspace(-4 * np.pi, 4 * np.pi, 100)
    s = Star(spot_contrast=0.3,
             u_ld=[0, 0],
             n_phases=len(times),
             rotation_period=2 * np.pi)
    # Note: the longitudes in fleck and dot are different by a constant
    # offset of -pi/2
    lc = s.light_curve((lons - np.pi / 2).T[:, None] * u.rad,
                       lats.T[:, None] * u.rad,
                       rads.T[:, None],
                       inc * u.deg,
                       times=times,
                       time_ref=0)[:, 0]

    # Define (arbitrary) error for the light curve
    errs = np.std(lc) * np.ones_like(lc) / 10

    m = Model(light_curve=LightCurve(times, lc - np.median(lc), errs),
              rotation_period=2 * np.pi,
              n_spots=2,
              partition_lon=False,
              contrast=0.3)

    # Create a starting point for the dot model in the correctly transformed
    # notation from fleck to dot
    start = {
        "dot_R_spot": np.array([rads]),
        "dot_lat": np.array([np.pi / 2 - lats]),
        "dot_lon": np.array([lons]),
        "dot_comp_inc": np.radians(90 - inc),
        "dot_ln_shear": np.log(1e-2),
        "dot_P_eq": 2 * np.pi,
        "dot_f0": m.lc.flux.max()
    }

    # Need to call this to validate ``start``
    pm.util.update_start_vals(start, m.pymc_model.test_point, m.pymc_model)

    # the fit is not normalized to its median like the input light curve is
    fit = m(start)
    # ...so we normalize it before we compare:
    fit -= np.median(fit)

    # Compute the chi^2. This should be super small if both models agree!
    chi2 = np.sum((m.lc.flux - fit)**2)
    assert np.log10(chi2) < expected
Exemple #11
0
def test_overfit_metric_lombscargle():
    """Sanity checks for `overfit_metric_lombscargle`"""
    # Create artificial flat and sinusoid light curves
    time = np.arange(1, 100, 0.1)
    lc_flat = LightCurve(time=time, flux=1, flux_err=0.0)
    lc_sine = LightCurve(time=time, flux=np.sin(time) + 1, flux_err=0.0)

    # If the light curve didn't change, it should be "perfect", i.e. metric == 1
    assert overfit_metric_lombscargle(lc_flat, lc_flat) == 1.0
    assert overfit_metric_lombscargle(lc_sine, lc_sine) == 1.0

    # If the light curve went from a sine to a flat line,
    # no noise was introduced, hence metric == 1 (good)
    assert overfit_metric_lombscargle(lc_sine, lc_flat) == 1.0

    # If the light curve went from flat to sine, metric == 0 (bad)
    assert overfit_metric_lombscargle(lc_flat, lc_sine) == 0.0
    # However, if the light curves were noisy to begin with, it shouldn't be considered that bad
    lc_flat.flux_err += 0.5
    lc_sine.flux_err += 0.5
    assert overfit_metric_lombscargle(lc_flat, lc_sine) > 0.5
Exemple #12
0
def test_sinusoid_noise():
    """Can we remove simple sinusoid noise added to a flat light curve?"""
    size = 100
    time = np.linspace(1, 100, size)
    true_flux = np.ones(size)
    noise = np.sin(time / 5)
    # True light curve is flat, i.e. flux=1 at all time steps
    true_lc = LightCurve(time=time,
                         flux=true_flux,
                         flux_err=0.1 * np.ones(size))
    # Noisy light curve has a sinusoid single added
    noisy_lc = LightCurve(time=time,
                          flux=true_flux + noise,
                          flux_err=true_lc.flux_err)
    design_matrix = DesignMatrix({
        "noise": noise,
        "offset": np.ones(len(time))
    },
                                 name="noise_model")

    for dm in [design_matrix, design_matrix.to_sparse()]:
        # Can we recover the true light curve?
        rc = RegressionCorrector(noisy_lc)
        corrected_lc = rc.correct(dm)
        assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)

        # Can we produce the diagnostic plot?
        rc.diagnose()

        # Does it work when we set priors?
        dm.prior_mu = [0.1, 0.1]
        dm.prior_sigma = [1e6, 1e6]
        corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
        assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)

        # Does it work when `flux_err` isn't available?
        noisy_lc = LightCurve(time=time, flux=true_flux + noise)
        corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
        assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
Exemple #13
0
def load_file(file: str,
              clip: float = 4,
              it: int = 1,
              apply_file_correction: bool = False) -> LightCurve:
    """
    Loads and normalizes target content
    :param file: Name of target including path
    :return: LightCurve object
    """
    if not os.path.exists(file):
        raise IOError(ctext(f"File {file} doesn't exist!", error))

    mprint(f"Reading data from {file} ...", log)
    try:
        data = np.loadtxt(file)
    except ValueError:
        data = read_csv(file)
        data = np.array((data.time, data.flux))
    if data.shape[0] > data.shape[1]:
        data = data.T

    if data.shape[0] == 2:
        lc = LightCurve(time=data[0], flux=data[1])
    else:
        lc = LightCurve(time=data[0], flux=data[1], flux_err=data[2])

    lc = lc.remove_nans()
    if apply_file_correction:
        lc.flux = lc.flux + float(np.amin(lc.flux)) + 10
        lc = lc.remove_outliers(clip, maxiters=it)
        lc = mag(lc)
        lc = lc.remove_nans()
    else:
        if np.amax(np.abs(lc.flux)) > 10:
            mprint(
                f"It seems as if your flux isn't in magnitudes. Be aware, that SMURFS expects the flux in magnitudes. "
                f"Continuing ...", warn)
        if np.abs(np.median(lc.flux)) > 1:
            mprint(
                f"The median of your flux is {'%.2f' % np.median(lc.flux)}. To do a proper analysis, the median should "
                f"be close to 0. Be aware, that this might cause issues. Continuing...",
                warn)
    mprint(
        f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.",
        log)
    mprint("Extracted data from target!", info)
    return lc
Exemple #14
0
def test_regressioncorrector_priors():
    """This test will fit a design matrix containing the column vectors
    a=[1, 1] and b=[1, 2] to a light curve with flux=[5, 10].

    The best coefficients for this problem are [0, 5] because 0*a + 5*b == flux,
    however we will verify that changing the priors will yield different
    solutions.
    """
    lc1 = LightCurve(flux=[5, 10])
    lc2 = LightCurve(flux=[5, 10], flux_err=[1, 1])
    design_matrix = DesignMatrix(pd.DataFrame({"a": [1, 1], "b": [1, 2]}))
    for dm in [design_matrix, design_matrix.to_sparse()]:
        for lc in [lc1, lc2]:
            rc = RegressionCorrector(lc)

            # No prior
            rc.correct(dm)
            assert_almost_equal(rc.coefficients, [0, 5])

            # Strict prior centered on correct solution
            dm.prior_mu = [0, 5]
            dm.prior_sigma = [1e-6, 1e-6]
            rc.correct(dm)
            assert_almost_equal(rc.coefficients, [0, 5])

            # Strict prior centered on incorrect solution
            dm.prior_mu = [99, 99]
            dm.prior_sigma = [1e-6, 1e-6]
            rc.correct(dm)
            assert_almost_equal(rc.coefficients, [99, 99])

            # Wide prior centered on incorrect solution
            dm.prior_mu = [9, 9]
            dm.prior_sigma = [1e6, 1e6]
            rc.correct(dm)
            assert_almost_equal(rc.coefficients, [0, 5])
Exemple #15
0
def __flatten_interp_transits(all_time, all_flux, period, t0, duration):
    """
    all_time, all_flux: real time of the lightcurve, before folding.\n
    duration: in Days
    return the flattened time, flux, not folding
    """

    fold_time = all_time % period
    t0 %= period
    half_duration = duration / 2.0
    mask = np.logical_and(fold_time <= t0 + half_duration,
                          fold_time >= t0 - half_duration)

    mask[0] = False
    mask[-1] = False

    other_time = all_time[~mask]
    other_flux = all_flux[~mask]

    if len(other_flux) > 2:
        func = interp1d(other_time, other_flux, fill_value='extrapolate')
        interp_flux = func(all_time[mask])
    else:
        interp_flux = all_flux[mask]

    flat_flux = all_flux.copy()
    flat_flux[mask] = interp_flux

    lc = LightCurve(all_time, flat_flux).flatten(window_length=51,
                                                 polyorder=2,
                                                 break_tolerance=40,
                                                 sigma=5,
                                                 niters=5)

    all_time, flat_flux = lc.time, lc.flux

    flat_flux[mask] = all_flux[mask]

    # keep the transit intact

    tce_time, tce = all_time[mask], all_flux[mask]
    tce_time, tce = sigma_clip(tce_time, tce, sigma=3.0)
    all_flux[mask] = tce
    # sigma clip the outliers in the transit

    return all_time, flat_flux
Exemple #16
0
def m2(a, b):
    b_original = b.copy()
    mask = np.logical_and(a >= -0.5, a <= 0.5)
    tce_time = a[mask]
    tce = b[mask]
    func = interp1d(tce_time, tce)
    interp_flux = func(tce_time)
    b[mask] = interp_flux

    logging.info(np.all(b == b_original))
    lc, trend_lc = LightCurve(a, b).flatten(break_tolerance=40,
                                            window_length=401,
                                            return_trend=True)

    flat_b = lc.flux
    flat_b[mask] = tce
    return a, flat_b, trend_lc
Exemple #17
0
def simulate_transit_data(N=1e6,
                          cadence=2.0 / 60.0 / 24.0,
                          duration=3.0,
                          tmin=0.0,
                          **kw):
    '''
    This function will generate a simulated LightCurve dataset
    with a given fractional noise (sigma) and time spacing (cadence),
    with a transit injected into it (whose parameters are set by any
    extra keyword arguments that you feed in, like `period`, `radius`, ...)

    Parameters
    ----------

    N : float
        The average number of photons expected per exposure, to
        set the standard deviation of the noise.

    cadence : float
        The integration time of the measurements, in days.

    duration : float
        The total length of time covered by the light curve.

    **kw : dict
        Any additional keywords will be passed onward to the
        batman model to set the parameters of the transit model.
        Valid additional keywords are period, t0, radius, a, b, ld.

    Returns
    -------
    lc : LightCurve
        A simulated lightkurve LightCurve, with a transit injected,
        and the specified noise.
    '''
    noise = create_photon_lightcurve(N=N, cadence=cadence,
                                     duration=duration).normalize()
    noise.time += tmin
    flux = BATMAN(noise.time, **kw)
    return LightCurve(time=noise.time,
                      flux=flux * noise.flux,
                      flux_err=noise.flux_err,
                      time_format='jd')
Exemple #18
0
def ab_dor_example_lc(path=None):
    """
    Return a `~lightkurve.lightcurve.LightCurve` object with the first few TESS
    observations of the rapidly-rotating, spotted star AB Doradus.

    Parameters
    ----------
    path : None or str
        Path to the file to load (optional)

    Returns
    -------
    lc : `~lightkurve.lightcurve.LightCurve`
        Light curve of AB Doradus
    """
    if path is None:
        path = os.path.join(os.path.dirname(__file__), 'data',
                            'abdor_lc_example.npy')
    return LightCurve(*np.load(path))
Exemple #19
0
    def make_lightcurves(self):

        # create an empty list of photometry tables and times
        tables = []
        times = []

        # loop over all the images
        for i in tqdm(range(len(self.images))):
            table = self.photometry(image_number=i, quick=True)
            table.add_index('#')
            tables.append(table)
            times.append(self.images.time[i].jd)

        lightcurves = {}
        for k in tables[0]['#']:
            lightcurves[k] = LightCurve( flux=[t.loc[k]['flux'] for t in tables])

        self.lightcurves = lightcurves
        return lightcurves
def test_sff_breakindex():
    """Regression test for #616."""
    lc = LightCurve(flux=np.ones(20))
    with warnings.catch_warnings():
        # Ignore "LightkurveWarning: The design matrix has low rank".
        warnings.simplefilter("ignore", LightkurveWarning)
        corr = SFFCorrector(lc)
        corr.correct(
            breakindex=[5, 10],
            centroid_col=np.random.randn(20),
            centroid_row=np.random.randn(20),
        )
        assert 5 in corr.window_points
        assert 10 in corr.window_points
        corr.correct(
            breakindex=[5, 10],
            centroid_col=np.random.randn(20),
            centroid_row=np.random.randn(20),
            windows=1,
        )
        assert_array_equal(corr.window_points, np.asarray([5, 10]))
Exemple #21
0
def test_rms_timescale():

	time = np.linspace(0, 27, 100)
	flux = np.zeros(len(time))

	rms = rms_timescale(LightCurve(time=time, flux=flux))
	print(rms)
	np.testing.assert_allclose(rms, 0)

	rms = rms_timescale(LightCurve(time=time, flux=flux*np.nan))
	print(rms)
	assert np.isnan(rms), "Should return nan on pure nan input"

	rms = rms_timescale(LightCurve(time=[], flux=[]))
	print(rms)
	assert np.isnan(rms), "Should return nan on empty input"

	# Pure nan in the time-column should raise ValueError:
	with pytest.raises(ValueError):
		with warnings.catch_warnings():
			warnings.filterwarnings('ignore', category=LightkurveWarning, message='LightCurve object contains NaN times')
			rms = rms_timescale(LightCurve(time=time*np.nan, flux=flux))

	# Time with invalid contents (e.g. Inf) should throw an ValueError:
	time_invalid = time.copy()
	time_invalid[2] = np.inf
	with pytest.raises(ValueError):
		rms = rms_timescale(LightCurve(time=time_invalid, flux=flux))

	time_someinvalid = time.copy()
	time_someinvalid[2] = np.nan
	with warnings.catch_warnings():
		warnings.filterwarnings('ignore', category=LightkurveWarning, message='LightCurve object contains NaN times')
		rms = rms_timescale(LightCurve(time=time_someinvalid, flux=flux))
	print(rms)
	np.testing.assert_allclose(rms, 0)

	# Test with timescale longer than timespan should return zero:
	flux = np.random.randn(1000)
	time = np.linspace(0, 27, len(flux))
	rms = rms_timescale(LightCurve(time=time, flux=flux), timescale=30.0)
	print(rms)
	np.testing.assert_allclose(rms, 0)
Exemple #22
0
def load_from_hdf5(kic, data_path=None, index_file=None):
    """
    Load a light curve from the HDF5 archive on Google Cloud Platform.
    """
    if data_path is None:
        data_path = hdf5_archive_disk
    if index_file is None:
        index_file = hdf5_index_path
    index_path = os.path.join(data_path, index_file)
    stars_index = pd.read_csv(index_path)
    star_path_list = stars_index.loc[stars_index["KIC"] ==
                                     kic]["filepath"].values
    if len(star_path_list) == 0:
        raise ValueError(f'Target KIC {kic} not in database.')
    star_path = star_path_list[0]

    with h5py.File(os.path.join(data_path, star_path), "r") as f:
        time = np.array(f[str(kic)].get("PDC_SAP_time"))
        flux = np.array(f[str(kic)].get("PDC_SAP_flux"))
        flux_err = np.array(f[str(kic)].get("PDC_SAP_flux_err"))

    pdcsap = LightCurve(time=time, flux=flux, flux_err=flux_err, targetid=kic)

    return pdcsap
Exemple #23
0
def bls_vetsheet(lc, results=0, show=True, save=False, savename='vetsheet.png'):
    """
    Function to plot a vetting sheet after running the 
    transit_tools.signal_search function. Output is formatted to fit onto a
    standard 8"x11" sheet of paper.

    Parameters
    ----------
    lc : `transit_tools.lightcurve` object
        Input transit_tools `transit_tools.lightcurve` object that has had a 
        BLS signal search performed on it.
    results : int
        Index of results attribute array of provided 'lightcurve' object. 
        Indicates which set of results to plot if signal_search produced more
        than one set of output results. Can be set to -1 to display the most
        recent signal run that did not meet the significance threshold.
    show : bool or str
        Flag to determine whether plots will be displayed or not. Must be set to
        False or 'both' for output matplotlib object to be expected.
    save : bool
        Flag to determine whether the plots will be saved as a PNG.
    savename : str or None
        File name for plots to be saved as if save is set to True.

    Returns
    -------
    plots : matplotlib object
       Output matplotlib plot object. Optional if show is set to False or 
       'both'.
    """
    if not hasattr(lc, 'results') or len(lc.results) == 0:
        raise ValueError('lightcurve object has no results.')
    elif len(lc.results) > 0:
        res = lc.results[results]
        bls = lc.blsobjs[results] #rerun on cleanlc[results]?
        model = bls.get_transit_model(period=res['period'],
                                      transit_time=res['t0'],
                                      duration=res['duration'])
        
    if results == 0 or len(lc.results) == 0:
        time = lc.time
        flux = lc.flux
        flux_err = lc.flux_err
    else:
        time = lc.cleanlc[results].time
        flux = lc.cleanlc[results].flux
        flux_err = lc.cleanlc[results].flux_err

    lc_tmp = LightCurve(time, flux, flux_err)
    
    #setting up figure
    fig = plt.figure(figsize=(8, 9.2))
    gs = fig.add_gridspec(5, 2)

    #phase-folded light curve with transit model
    ax = fig.add_subplot(gs[0, 1])

    fold = lc_tmp.fold(res['period'], t0=res['t0'])
    binfold = fold.bin(20)
    
    ax.scatter(fold.time, fold.flux, color='blue', s=0.2,
               alpha=0.5, zorder=2)
    model.fold(res['period'], t0=res['t0']).plot(ax=ax, color='r', lw=2,
                                                 alpha=0.7)
    ax.scatter(binfold.time, binfold.flux, c='k', s=2.)
    
    ax.set_xlim(-0.05, 0.05)
    ax.set_xlabel('Phase')
    ax.set_ylabel('Relative Flux')







    plt.tight_layout()
    
    if show:
        plt.show()

    if save:
        plt.savefig(savename)
def loadLC(folderName, downloadDir, errorIfNot2Min=True, dumpHeader=False, delimiter="|", fluxType="PDCSAP", normalised=True):
    """
    Loads multiple and single Tess light curves and creates a Lightkurve object to store them in. 
    Multiple LCs are detected when the folderName string contains a delimiter.
    
    :param folderName: name of the data folder
    :param downloadDir: name of the root data folder
    :param errorIfNot2Min: behaviour if cadence is not 2 min if `True` (default), raises an error, else warning
    :param dumpHeader: if `True` prints the header of the data
    :param delimiter: delimiter chosen to separate the data path, defualt: |
    :param fluxType: SAP or PDCSAP
    :param normalised: if `True` returns the median-normalised flux
    """

    lc = None
    if "|" in folderName:
        folderNames = folderName.split(delimiter)
    else:
        folderNames = [folderName]
        
    for folderName in folderNames:
        imgFname = "{}_lc.fits".format(folderName)
        imgFname = os.path.join(downloadDir, folderName, imgFname)
        head = fits.getheader(imgFname)
        sector = head["sector"]
        if dumpHeader:
            print(repr(head))
        lightCurveData = Table.read(imgFname)
        cadeances = np.nanmedian((lightCurveData["TIME"][1:] - lightCurveData["TIME"][:-1])*24*60)
        if np.abs(cadeances-2.) < 0.5:
            logger.info("Cadence is 2 min for {}".format(imgFname))
        else:
            if errorIfNot2Min:
                raise RuntimeError("Cadence is {:1.1f} min for {}".format(cadeances, imgFname))
            else:
                logger.warning("Cadence is {:1.1f} min for {}".format(cadeances, imgFname))
        
        lightCurveData["TIME"].unit = u.day
        time = lightCurveData["TIME"]
        flux = lightCurveData["{}_FLUX".format(fluxType)] 
        fluxErr = lightCurveData["{}_FLUX_ERR".format(fluxType)]

        meta = {
            "TIME": lightCurveData["TIME"],
            "MOM_CENTR1": lightCurveData["MOM_CENTR1"],
            "MOM_CENTR2": lightCurveData["MOM_CENTR2"],
            "MOM_CENTR1_ERR": lightCurveData["MOM_CENTR1_ERR"],
            "MOM_CENTR2_ERR": lightCurveData["MOM_CENTR2_ERR"],
            "POS_CORR1": lightCurveData["POS_CORR1"],
            "POS_CORR2": lightCurveData["POS_CORR2"],
            }

        lcTemp = LightCurve(time=time, flux=flux, flux_err=fluxErr, meta=meta)
        lcTemp = lcTemp.remove_nans()
        if normalised:
            lcTemp = lcTemp.normalize()
            
        if lc is None:
            lc = lcTemp
            sectors = sector
        else:
            lc = lc.append(lcTemp)
            sectors = "{},{}".format(sectors, sector)
    
    ids = np.argsort(lc.time)
    lc = lc[ids]
    
    return lc, sectors
Exemple #25
0
def batman_transit(period,
                   rp,
                   a,
                   u=[0.4804, 0.1867],
                   t0=0.,
                   inc=90.,
                   ecc=0.,
                   w=90.,
                   limb_dark='quadratic',
                   cadence=0.01,
                   length=None,
                   time=None,
                   **kwargs):
    """
    Function to generate a simulated 'LightCurve' object using the BATMAN
    package developed by Laura Kreidberg. Please see BATMAN documentation for a
    more in-depth description of each parameter.

    !!Fix length argument. Make it remove parts of light curve from boths sides
      of transit!!
    !!Integrate supersample_factor and exp_time arguments better since they're
      likely going to be common for TESS. Maybe automatically do it for user!!

    Parameters
    ----------
    period : float
       Orbital period in days of the simulated planet.
    rp : float
       Radius of planet in units of stellar radii.
    a : float
       Semi-major axis of planet orbit in units of stellar radii.
    u : array
       Limb darkening coefficients. Defaults to a G2V star in the Kepler 
       bandpass.
    t0 : float
       Mid-transit time of the first transit.
    inc : float
       Orbital inclination in degrees.
    ecc : float
       Eccentricity of planet orbit. May cause simulation to run slowly if set 
       to a nonzero eccentricity and noise is added using a separate function.
    w : float
       Longitude of periastron in degrees.
    limb_dark : str
       Limb darkening model.
    cadence : float
       Cadence of data points in minutes.
    length : float or None
       Length of output light curve in days. If set to None, defaults to one 
       orbital period in length centered on the first transit.
    time : array
       Array of time points during which the light curve will be simulated.
    kwargs
       Additional arguments to be passed to batman.TransitModel. For TESS
       light curves, it is recommended to specify the supersample_factor and
       exp_time arguments for best results.

    Returns
    -------
    lc : 'LightCurve' object
       The light curve of the simulated planet transit for one full orbital 
       period. Light curve will have attribute 'params' that is an object
       containing all of the input simulated parameters.
    """
    if length is not None and time is not None:
        raise ValueError('Please only specify either length or time')

    if t0 is None:
        t0 = 0.

    params = batman.TransitParams()
    params.t0 = t0
    params.per = period
    params.rp = rp
    params.a = a
    params.inc = inc
    params.ecc = ecc
    params.w = w
    params.u = u
    params.limb_dark = str(limb_dark)

    if not length:
        length = period

    if time is None:
        t = np.linspace(-period / 2, length - (period / 2),
                        int((period * 24 * 60) // cadence))
    else:
        t = time

    m = batman.TransitModel(params, t, **kwargs)
    flux = m.light_curve(params)

    lc = LightCurve(t, flux, flux_err=None)

    lc.params = params

    return lc
Exemple #26
0
def bls_search(*args, minimum_period=1, maximum_period=None,
               frequency_factor=1, per_trials=10000, period=None,
               rms_calc=True, clean_lc=False, del_dur=1.0, blsobj_out=False):
    """
    Function to search a light curve using the Box Least Squares function

    Parameters
    ----------
    *args
        either t, f , f_err or lightkurve obj
    minimum_period : float
        Minimum of period range to search. Default 0.1.
    maximum_period : float or None
        Maximum of period range to search in same units of the light curve. If None, defaults to half the duration of the light curve.
    per_trials : int or None
        Number of trials to search in the period range. Default is 10000. Might consider deprecating.
    periods : `np.array`
        User can specify exact periods to sample rather than the min, max, and number of trials.
    frequency_factor : float
        Spacing between frequencies. Used to generate period search grid.
    rms_calc : bool
        If the RMS should be calculated in the event of no error input.
    clean_lc : bool
        Flag to indicate whether or not to output a cleaned lightcurve with 
        the recovered periodic signal masked out. Results in an additional 
        expected output. Default ``False``.
    del_dur : float
        How many durations worth of data points should be excluded from 
        cleaned light curve centered on the transit center. Default is 1. 
        Values < 1 will result in some in-transit points remaining while 
        values > 1 will remove some points outside the transit.
    blsobj_out : bool
        Flag to determine if the bls periodogram object will be included in 
        the outputs. If ``True``, another output will be expected. Default
        ``False``.
    **kwargs
        Additional arguments to be passed to `lightkurve.BoxLeastSquaresPeriodogram.from_lightcurve` and `astropy.timeseries.BoxLeastSquares.power`.

    Returns
    -------
    results : dict
       Results of the BLS fit.
    cleaned_lc : ``lightkurve.LightCurve`` object, optional
       A light curve with the transits masked out based on the results of the 
       BLS search.
    """
    #Parsing light curve input args
    if len(args) == 1:
        lc = args[0]
        try:
            if lc.flux_err is None:
                print('No flux errors provided')
        except:
            print('No flux errors provided')

    elif len(args) > 1:
        time = args[0]
        flux = args[1]
        lc = LightCurve(time, flux, flux_err=None)
        if len(args) == 3:
            if args[2] is not None:
                lc.flux_err = args[2]
            else:
                print('No flux_errors provided')
        else:
            print('No flux errors provided')
            
    if rms_calc == True and lc.flux_err is None:
        lc.flux_err = np.ones(len(flux)) * rms(flux, norm_val=norm_val)
        print('RMS will be used for errors')

    #parsing period grid controls
    if period is not None:
        bls = lc.to_periodogram(method='bls', period=period, **kwargs)
    else:
        bls = lc.to_periodogram(method='bls', minimum_period=minimum_period,
                                maximum_period=maximum_period,
                                frequency_factor=frequency_factor)

    per = bls.period_at_max_power
    t0 = bls.transit_time_at_max_power
    dur = bls.duration_at_max_power

    results = {'period' : per.value, 't0' : t0, 'duration' : dur.value}
    
    #cleaning light curve if lc_clean flag
    if clean_lc:
        mask = bls.get_transit_mask(period=per, transit_time=t0,
                                    duration=(del_dur * dur))
        lc_clean = lc[mask]
        
    if clean_lc and not blsobj_out: 
        return results, lc_clean
    elif not clean_lc and blsobj_out:
        return results, bls
    elif not clean_lc and not blsobj_out:
        return results
    elif clean_lc and blsobj_out:
        return results, lc_clean, bls
Exemple #27
0
				m = nanmedian(flux)
				flux = 1e6*(flux/m - 1)
				flux_err = 1e6*flux_err/m

				#fig, ax = plt.subplots()
				#ax.plot(data[:,0], data[:,1])
				#fig.savefig(os.path.splitext(fpath_save)[0] + '.png', bbox_inches='tight')
				#plt.close(fig)

				# Save file:
				os.makedirs(os.path.dirname(fpath_save), exist_ok=True)
				np.savetxt(fpath_save, np.column_stack((time, flux, flux_err)),
					delimiter='  ', fmt=('%.8f', '%.16e', '%.16e'))

			# Calculate diagnostics:
			lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
			variance = nanvar(flux, ddof=1)
			rms_hour = rms_timescale(lc, timescale=3600/86400)
			ptp = nanmedian(np.abs(np.diff(flux)))

			# Add target to TODO-list:
			diag.write("{variance:.16e},{rms_hour:.16e},{ptp:.16e}\n".format(
				variance=variance,
				rms_hour=rms_hour,
				ptp=ptp
			))

		diag.write("#-------------------------------------------\n")

	print("DONE")
def test_sff_corrector():
    """Does our code agree with the example presented in Vanderburg
    and Johnson (2014)?"""
    # The following csv file, provided by Vanderburg and Johnson
    # at https://www.cfa.harvard.edu/~avanderb/k2/ep60021426.html,
    # contains the results of applying SFF to EPIC 60021426.
    fn = get_pkg_data_filename("../../tests/data/ep60021426alldiagnostics.csv")
    data = np.genfromtxt(fn, delimiter=",", skip_header=1)
    mask = data[:, -2] == 0  # indicates whether the thrusters were on or off
    time = data[:, 0]
    raw_flux = data[:, 1]
    corrected_flux = data[:, 2]
    centroid_col = data[:, 3]
    centroid_row = data[:, 4]

    # NOTE: we need a small number of windows below because this test data set
    # is unusually short, i.e. has an unusually small number of cadences.
    lc = LightCurve(time=time,
                    flux=raw_flux,
                    flux_err=np.ones(len(raw_flux)) * 0.0001)
    sff = SFFCorrector(lc)
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        restore_trend=True,
        windows=1,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
    assert len(sff.window_points) == 0  # expect 0 break points for 1 window

    # masking
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        windows=3,
        restore_trend=True,
        cadence_mask=mask,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
    assert len(sff.window_points) == 2  # expect 2 break points for 3 windows

    # masking and breakindex
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        windows=3,
        restore_trend=True,
        cadence_mask=mask,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()

    # masking and breakindex and iters
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        windows=3,
        restore_trend=True,
        cadence_mask=mask,
        niters=3,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()

    # masking and breakindex and bins
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        windows=3,
        restore_trend=True,
        cadence_mask=mask,
        bins=5,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
    assert np.all((sff.lc.flux_err / sff.corrected_lc.flux_err) == 1)

    # masking and breakindex and bins and propagate_errors
    corrected_lc = sff.correct(
        centroid_col=centroid_col,
        centroid_row=centroid_row,
        windows=3,
        restore_trend=True,
        cadence_mask=mask,
        bins=5,
        propagate_errors=True,
    )
    assert np.isclose(corrected_flux, corrected_lc.flux, atol=0.001).all()
    assert np.all((sff.lc.flux_err / sff.corrected_lc.flux_err) < 1)

    # test using KeplerLightCurve interface
    klc = KeplerLightCurve(
        time=time,
        flux=raw_flux,
        flux_err=np.ones(len(raw_flux)) * 0.0001,
        centroid_col=centroid_col,
        centroid_row=centroid_row,
    )
    sff = klc.to_corrector("sff")
    klc = sff.correct(windows=3, restore_trend=True)
    assert np.isclose(corrected_flux, klc.flux, atol=0.001).all()

    # Can plot
    sff.diagnose()
Exemple #29
0
def full_batlc(period,
               rp,
               a,
               noise='obs',
               inlc=None,
               t0=None,
               sectors='all',
               cadence='2min',
               **kwargs):
    """
    Function to retrive a full simulated BATMAN light curve complete with 
    injected noise.

    !!Allow custom noise model!!
    !!Update name processing once name processing wrapper function written!!
    !!Utilize actual stellar params somehow?!!
    !!Search for known planets in the real system and pass those on as well!!
    !!Write docstrings!!
    !!Pass which TIC ID was used!!
    !!Allow for generation of gaussian noise!!
    !!Allow custom lc for inlc!!
    !!Raise error for noise='obs' and inlc=None!!

    Parameters
    ----------
    period : float
       Orbital period to be simulated. Should be in days.
    rp : float
       Radius of planet in units of host star radii. Essentially Rp/Rs.
    a : float
       Semi-major axis of the orbit in units of host star radii.
    noise : str or None
       Noise to be included in simulation. Current options are 'obs' for 
       injection into pre-existing light curve or None for a simulated light
       curve without noise. Gaussian noise is expected in the future.
    inlc : None or int
       TIC ID of light curve for simulations to be injected into. Can be set to
       None for a light curve to be randomly chosen from a pre-selected list in
       transit_tools/files/lc_list.csv if noise='obs'.
    t0 : float or None
       Mid-transit time of the first transit. If None, t0 will be randomly 
       generated to be somewhere within 1 period of the start of the 
       observation. Must be in the same units as period.
    sectors : str or array
       Sectors to be considered for retrieving light curves specified by inlc.
       If 'all', all available light curves for selected inlc will be 
       retrieved, otherwise only those contained in the user-provided array will
       be retrieved.
    cadence : str
       Cadence of TESS light curve to retrieve. Options are '2min', 'ffi_ml', or
       'eleanor'. This method is passed to transit_tools.fetch_lc.gather_lc and
       follows those hierarchy rules if a light curve at the given cadence
       cannot be found.
    kwargs
       Additional arguments to be passed to transit_tools.batman_transit.
    """
    tic = None

    if not inlc and noise == 'obs':
        #randomly choose lc to inject into from preselected list
        path = Path(__file__).parent / "./files/lc_list.csv"
        lc_list = np.genfromtxt(path, delimiter=',')
        lc_list = np.delete(lc_list, 0, 0)[:, 0]

        inlc = int(np.random.choice(lc_list))
        tic = inlc

    if isinstance(inlc, str):
        #perform name processing and set lc to TIC ID
        inlc = name_to_tic(str(inlc))
        tic = inlc

    if isinstance(inlc, int):
        #inject into chosen TIC ID using gather_lc
        tic = inlc
        inlc, sectors = gather_lc(inlc,
                                  sectors=sectors,
                                  return_sectors=True,
                                  method=cadence)

    if inlc is not None:
        #inject into chosen lc
        time = inlc.time
        flux = inlc.flux
        flux_err = None
        if hasattr(inlc, 'flux_err'):
            flux_err = inlc.flux_err

        if not t0:
            t0_range = time[time < (time[0] + period)]
            t0 = np.random.choice(t0_range)
        else:
            t0 = t0

        model = batman_transit(period=period,
                               rp=rp,
                               a=a,
                               t0=t0,
                               time=time,
                               **kwargs)

        params = model.params

        flux = flux + model.flux - 1

        inlc = LightCurve(time, flux, flux_err=flux_err)

        inlc.sectors = sectors
        inlc.params = params
        inlc.tic = tic

    elif noise == None:
        inlc = batman_transit(period=period, rp=rp, a=a, t0=t0, **kwargs)
        inlc.tic = None
        inlc.sectors = None

    inlc.known_pls = {
        'orbital_period': inlc.params.per,
        't0': inlc.params.t0,
        'rprs': inlc.params.rp,
        'a': inlc.params.a,
        'inc': inlc.params.inc,
        'ecc': inlc.params.ecc,
        'w': inlc.params.w,
        'u': inlc.params.u,
        'limb_dark': inlc.params.limb_dark,
        'pl_name': 'batman'
    }

    return inlc
Exemple #30
0
def tls_search(*args, tic=None, shape='default', star_params=None,
               rms_calc=True, norm_val=1., clean_lc=False,
               starparams_out=False,  del_dur=1., verbose=False, nthreads=6,
               **kwargs):
    #!!Change if flux_err is all nans to replace it with rms flux_err!!
    """
    Function to perform a search for periodic signals using the Transit Least 
    Squares (TLS) algorithm developed by Hippke & Heller 2018. While slower 
    than Box Least Squares, the transit shape used in the search is more 
    realistic.

    Parameters
    ----------
    *args : `lightkurve.LightCurve` object or multiple numpy array arguments
        If the len = 1, then the argument is assumed to be a 
        `lightkurve.LightCurve` object with at least two columns, labeled 
        ``'time'`` and ``'flux'``, respectively, with an optional 
        ``'flux_err'`` column as the third column. If the len of > 1, then it 
        is assumed the user is passing ``time``, ``flux``, and ``flux_err`` 
        (optional), respectively. These columns or arguments should be arrays 
        of equal length.
    tic : int or None
        TIC ID of the source that the light curve comes from. This will be 
        used to query the TIC for the stellar parameters of the system. May 
        be set to ``None`` if a full dictionary of stellar params are provided
        to the ``star_params`` keyword.
    shape : str
        The shape used by TLS to search for periodic signals. The user may 
        specify ``'default'``, ``'grazing'``, or ``'box'``. See Hippke & 
        Heller 2018 for an in-depth description of these shapes.
    star_params : dict or None
        A dictionary containing stellar parameters to be used in the TLS 
        search. The dictionary can contain an array of limb-darkening 
        parameters, stellar radius, lower radius error, upper radius error, 
        stellar mass, lower mass error, and upper mass error labeled ``'ab'``,
        ``'rstar'``, ``'rlow'``, ``'rhigh'``, ``'mstar'``, ``'mlow'``, and 
        ``'mhigh'``, respectively. The error values are the errors themselves 
        and not the upper and lower values for each of the parameters. A 
        partial list may be included, but in this case, the TIC must also be 
        given.
    rms_calc : bool
        A flag to denote whether the root mean square error will be applied 
        in the case that error values are not provided.
    norm_val : float
        Value that the light curve is normalized to. Default is 1. Only 1 or 
        0 are valid normalizations for TLS.
    clean_lc : bool
        Flag to indicate whether or not to output a cleaned lightcurve with 
        the recovered periodic signal masked out. Results in an additional 
        expected output. Default ``False``.
    starparams_out : bool
        Flag to indicate whether or not to output the dictionary of stellar
        parameters used in the TLS search. Results in an additional expected
        output.
    del_dur : float
        How many durations worth of data points should be excluded from 
        cleaned light curve centered on the transit center. Default is 1. 
        Values < 1 will result in some in-transit points remaining while 
        values > 1 will remove some points outside the transit.
    verbose : bool
        Flag to have function print more while it runs.
    nthreads : int
        Number of threads to be used for running the signal search. Many 
        times, cores have the capability to run multiple threads, so be sure 
        to check your machine to optimize this parameter.
    **kwargs : dict
        Optional arguments passed to the ``transitleastsquares.power`` 
        function.

    Returns
    -------
    results : dict
       Results of the TLS fit. See TLS documentation for the contents of this
       dictionary and descriptions of each element.
    cleaned_lc : ``lightkurve.LightCurve`` object, optional
       A light curve with the transits masked out based on the results of the 
       TLS search.
    """
    dy = None

    #processing inputs
    if not tic and (not star_params or len(star_params) != 7):
        raise ValueError('Either tic or full star_params dictionary must' +
                         ' be given!')

    if len(args) == 1:
        lc = args[0]
        time = lc.time
        flux = lc.flux
        try:
            if lc.flux_err is None:
                print('No flux errors provided')
            else:
                dy = lc.flux_err
        except:
            print('No flux errors provided')

    elif len(args) > 1:
        time = args[0]
        flux = args[1]
        if len(args) == 3:
            if args[2] is not None:
                dy = args[2]
            else:
                print('No flux_errors provided')
        else:
            print('No flux errors provided')
            
    if rms_calc == True and not isinstance(dy, np.ndarray):
        dy = np.ones(len(flux)) * rms(flux, norm_val=norm_val)
        print('RMS will be used for errors')

    #get catalog info and/or parse user-provided values
    if tic and (not star_params or len(star_params) != 7):
        print(f'Gathering stellar params that were not provided...', end='\r')

        ab, R_star, R_star_lowe, R_star_highe, M_star, M_star_lowe, M_star_highe = tls.catalog_info(TIC_ID=int(tic))
        cat = {'ab' : ab, 'rstar' : R_star, 'rlow' : R_star_lowe,
               'rhigh' : R_star_highe, 'mstar' : M_star, 'mlow' : M_star_lowe,
               'mhigh' : M_star_highe}
        
        if not star_params:
            star_params = {}

        missing = list(set(cat) - set(star_params))
        star_params.update({k: cat[k] for k in missing})
        
        print('Gathering stellar params that were not provided... Done!')

    #quality control for stellar params
    dc = star_params
        
    dc['rstar'] = 1.0 if math.isnan(dc['rstar']) else dc['rstar']
    dc['mstar'] = 1.0 if math.isnan(dc['mstar']) else dc['mstar']
    dc['mlow'] = 0.1 if math.isnan(dc['mlow']) else dc['mlow']
    dc['mhigh'] = 0.1 if math.isnan(dc['mhigh']) else dc['mhigh']
    dc['rlow'] = 0.1 if math.isnan(dc['rlow']) else dc['rlow']
    dc['rhigh'] = 0.1 if math.isnan(dc['rhigh']) else dc['rhigh']

    rmax = dc['rstar'] + dc['rhigh']
    rmin = dc['rstar'] - dc['rlow']
    mmax = dc['mstar'] + dc['mhigh']
    mmin = dc['mstar'] - dc['mlow']

    if verbose:
        print('Stellar params used:')
        for i in list(dc.keys()):
            print(str(i) + ' = ' + str(dc[i]))
        print('(defaults are solar and 0.1 for errors)')
            
    #beginning TLS search
    print('Searching using TLS using %s shape...' % shape)
    model = tls.transitleastsquares(t=time, y=flux, dy=dy)
    results = model.power(R_star=dc['rstar'], R_star_min=rmin, R_star_max=rmax,
                          M_star=dc['mstar'], M_star_min=mmin, M_star_max=mmax,
                          u=dc['ab'], transit_template=shape,
                          use_threads=nthreads, **kwargs)

    #cleaning light curve if clean_lc flag
    if clean_lc:
        intransit = tls.transit_mask(time, results.period,
                                     del_dur * results.duration, results.T0)
        time2 = time[~intransit]
        flux2 = flux[~intransit]
        dy2 = dy[~intransit]
        time2, flux2, dy2 = tls.cleaned_array(time2, flux2, dy2)

        lc_clean = LightCurve(time=time2, flux=flux2, flux_err=dy2)

    if clean_lc and not starparams_out:
        return results, lc_clean
    elif not clean_lc and starparams_out:
        return results, dc
    elif clean_lc and starparams_out:
        return results, lc_clean, dc
    else:
        return results