Exemple #1
0
def lc_detrend(time, flux, err, per, epoc, tdur):
    '''
    Function to flatten LCs. Removes transits and then fits a moving 
    average trend using a Savitzky-Golay filter - implemented using lightkurve
    '''

    flux_trend = np.zeros_like(flux) + flux
    phase_trend = (time - epoc) / per

    n_trans = np.int((time[-1] - epoc) / per + 1)
    for i in range(n_trans):
        trans = np.array((time[np.abs(phase_trend - i) < tdur / 24 / per],
                          flux[np.abs(phase_trend - i) < tdur / 24 / per]))
        length = np.int(len(trans[0]) / 4)

        m = (np.mean(trans[1, -1 * length:]) - np.mean(trans[1, :length])) / (
            np.mean(trans[0, -1 * length:]) - np.mean(trans[0, :length]))
        c = np.mean(trans[1, :length]) - m * np.mean(trans[0, :length])

        flux_trend[np.where(
            np.abs(phase_trend - i) < tdur / 24 / per)] = m * trans[0] + c

    flat_lc, trend_lc = TessLightCurve(time,
                                       flux_trend).flatten(window_length=101,
                                                           return_trend=True)

    flux_flat = flux / trend_lc.flux
    err_flat = err / trend_lc.flux
    return flux_flat, err_flat, trend_lc.flux, flux_trend
Exemple #2
0
def test_cbv_fit(INPUT_DIR):
    # Create CBV object:
    cbv = CBV(
        os.path.join(INPUT_DIR, 'cbv-prepare', 'cbv-s0001-c1800-a143.hdf5'))

    coeffs = [10, 500, 50, 100, 0, 10, 0]
    abs_flux = 3500

    # Create model using coefficients, and make fake lightcurve out of it:
    mdl = cbv.mdl(coeffs) * abs_flux

    # Another check of making crazy weights:
    #sigma = np.ones_like(mdl)*100
    #mdl[200] = 50000
    #sigma[200] = 1e-17

    lc = TessLightCurve(time=cbv.time, flux=mdl)

    # Run CBV fitting with fixed number of CBVs:
    flux_filter, res, diagnostics = cbv.fit(lc, cbvs=3, use_bic=False)

    # Plot:
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 16))
    ax1.scatter(cbv.time, mdl, alpha=0.3)
    ax1.plot(cbv.time, flux_filter, alpha=0.5, color='r')
    ax2.scatter(cbv.time, mdl - flux_filter)

    # Check the diagnostics dict:
    print(diagnostics)
    assert diagnostics['method'] == 'LS'
    assert not diagnostics['use_bic']
    assert not diagnostics['use_prior']

    # Check the coefficients coming out of the fit:
    # They should be the same as the ones we put in
    print(res - coeffs)
    np.testing.assert_allclose(res, coeffs, atol=0.5, rtol=0.5)

    # The fitted model should be very close to the model going in:
    np.testing.assert_allclose(mdl, flux_filter)
Exemple #3
0
    def __init__(self,
                 time=None,
                 flux=None,
                 flux_err=None,
                 time_format=None,
                 time_scale=None,
                 time_unit=None,
                 centroid_col=None,
                 centroid_row=None,
                 quality=None,
                 quality_bitmask=None,
                 channel=None,
                 campaign=None,
                 quarter=None,
                 sector=None,
                 mission=None,
                 cadenceno=None,
                 targetid=None,
                 ra=None,
                 dec=None,
                 label=None,
                 meta={},
                 detrended_flux=None,
                 detrended_flux_err=None,
                 flux_trends=None,
                 gaps=None,
                 flares=None,
                 flux_unit=None,
                 primary_header=None,
                 data_header=None,
                 pos_corr1=None,
                 pos_corr2=None,
                 origin='FLC',
                 fake_flares=None,
                 it_med=None,
                 pixel_flux=None,
                 pixel_flux_err=None,
                 pipeline_mask=None,
                 camera=None,
                 ccd=None,
                 saturation=None):

        if mission == 'TESS':
            TessLightCurve.__init__(
                self,
                time=time,
                flux=flux,
                flux_err=flux_err,
                time_format=time_format,
                time_scale=time_scale,
                centroid_col=centroid_col,
                centroid_row=centroid_row,
                quality=quality,
                quality_bitmask=quality_bitmask,
                camera=camera,
                cadenceno=cadenceno,
                targetid=targetid,
                ra=ra,
                dec=dec,
                label=label,
                meta=meta,
                sector=sector,
            )
            self.mission = mission
            self.campaign = None
            self.quarter = None
        else:
            KeplerLightCurve.__init__(self,
                                      time=time,
                                      flux=flux,
                                      flux_err=flux_err,
                                      time_format=time_format,
                                      time_scale=time_scale,
                                      centroid_col=centroid_col,
                                      centroid_row=centroid_row,
                                      quality=quality,
                                      quality_bitmask=quality_bitmask,
                                      channel=channel,
                                      campaign=campaign,
                                      quarter=quarter,
                                      mission=mission,
                                      cadenceno=cadenceno,
                                      targetid=targetid,
                                      ra=ra,
                                      dec=dec,
                                      label=label,
                                      meta=meta)

        self.flux_unit = flux_unit
        self.time_unit = time_unit
        self.gaps = gaps
        self.flux_trends = flux_trends
        self.primary_header = primary_header
        self.data_header = data_header
        self.pos_corr1 = pos_corr1
        self.pos_corr2 = pos_corr2
        self.origin = origin
        self.detrended_flux = detrended_flux
        self.detrended_flux_err = detrended_flux_err
        self.pixel_flux = pixel_flux
        self.pixel_flux_err = pixel_flux_err
        self.pipeline_mask = pipeline_mask
        self.it_med = it_med
        self.saturation = saturation

        columns = [
            'istart', 'istop', 'cstart', 'cstop', 'tstart', 'tstop', 'ed_rec',
            'ed_rec_err', 'ampl_rec', 'total_n_valid_data_points'
        ]

        if detrended_flux is None:
            self.detrended_flux = np.full_like(time, np.nan)
        else:
            self.detrended_flux = detrended_flux

        if detrended_flux_err is None:
            self.detrended_flux_err = np.full_like(time, np.nan)
        else:
            self.detrended_flux_err = detrended_flux_err

        if saturation is None:
            self.saturation = np.full_like(time, np.nan)
        else:
            self.saturation = saturation

        if flares is None:
            self.flares = pd.DataFrame(columns=columns)
        else:
            self.flares = flares

        if fake_flares is None:
            other_columns = ['duration_d', 'amplitude', 'ed_inj', 'peak_time']
            self.fake_flares = pd.DataFrame(columns=other_columns)
        else:
            self.fake_flares = fake_flares
def test_sff_tess_warning():
    """SFF is not designed for TESS, so we raise a warning."""
    lc = TessLightCurve(flux=[1, 2, 3], meta={"MISSION": "TESS"})
    with pytest.warns(LightkurveWarning, match="not suitable"):
        corr = SFFCorrector(lc)
def test_CBVCorrector():

    # Create a CBVCorrector without reading CBVs from MAST
    sample_lc = TessLightCurve(
        time=[1, 2, 3, 4, 5],
        flux=[1, 2, np.nan, 4, 5],
        flux_err=[0.1, 0.1, 0.1, 0.1, 0.1],
        cadenceno=[1, 2, 3, 4, 5],
        flux_unit=u.Unit("electron / second"),
    )

    cbvCorrector = CBVCorrector(sample_lc, do_not_load_cbvs=True)
    # Check that Nan was removed
    assert len(cbvCorrector.lc.flux) == 4
    # Check that the median flux value is preserved
    assert_allclose(
        np.nanmedian(cbvCorrector.lc.flux).value,
        np.nanmedian(sample_lc.flux).value)

    dm = DesignMatrix(pd.DataFrame({"a": np.ones(4), "b": [1, 2, 4, 5]}))

    # ***
    # RegressionCorrector.correct passthrough method
    lc = cbvCorrector.correct_regressioncorrector(dm)
    # Check that returned lc is in absolute flux units
    assert isinstance(lc, TessLightCurve)
    # The design matrix should have completely zeroed the flux around the median
    lc_median = np.nanmedian(lc.flux)
    assert_allclose(lc.flux, lc_median)

    # ***
    # Gaussian Prior fit
    lc = cbvCorrector.correct_gaussian_prior(cbv_type=None,
                                             cbv_indices=None,
                                             alpha=1e-9,
                                             ext_dm=dm)
    assert isinstance(lc, TessLightCurve)
    # Check that returned lc is in absolute flux units
    assert lc.flux.unit == u.Unit("electron / second")
    # The design matrix should have completely zeroed the flux around the median
    lc_median = np.nanmedian(lc.flux)
    assert_allclose(lc.flux, lc_median)
    ax = cbvCorrector.diagnose()
    assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes._subplots.Axes)

    # Now add a strong regularization term and under-fit the data
    lc = cbvCorrector.correct_gaussian_prior(cbv_type=None,
                                             cbv_indices=None,
                                             alpha=1e9,
                                             ext_dm=dm)
    # There should be virtually no change in the flux
    assert_allclose(lc.flux, sample_lc.remove_nans().flux)

    # This should error because the dm has incorrect number of cadences
    dm_err = DesignMatrix(pd.DataFrame({
        "a": np.ones(5),
        "b": [1, 2, 4, 5, 6]
    }))
    with pytest.raises(ValueError):
        lc = cbvCorrector.correct_gaussian_prior(cbv_type=None,
                                                 cbv_indices=None,
                                                 alpha=1e-2,
                                                 ext_dm=dm_err)

    # ***
    # ElasticNet fit
    lc = cbvCorrector.correct_elasticnet(cbv_type=None,
                                         cbv_indices=None,
                                         alpha=1e-20,
                                         l1_ratio=0.5,
                                         ext_dm=dm)
    assert isinstance(lc, TessLightCurve)
    assert lc.flux.unit == u.Unit("electron / second")
    # The design matrix should have completely zeroed the flux around the median
    lc_median = np.nanmedian(lc.flux)
    assert_allclose(lc.flux, lc_median, rtol=1e-3)
    ax = cbvCorrector.diagnose()
    assert len(ax) == 2 and isinstance(ax[0], matplotlib.axes._subplots.Axes)
    # Now add a strong regularization term and under-fit the data
    lc = cbvCorrector.correct_elasticnet(cbv_type=None,
                                         cbv_indices=None,
                                         alpha=1e9,
                                         l1_ratio=0.5,
                                         ext_dm=dm)
    # There should be virtually no change in the flux
    assert_allclose(lc.flux, sample_lc.remove_nans().flux)

    # ***
    # Correction optimizer
    # The optimizer cannot be run without downloading targest from MAST for use
    # within the under-fitting metric.
    # So let's just verify it fails as expected (not much else we can do)
    dm_err = DesignMatrix(pd.DataFrame({
        "a": np.ones(5),
        "b": [1, 2, 4, 5, 6]
    }))
    with pytest.raises(ValueError):
        lc = cbvCorrector.correct(
            cbv_type=None,
            cbv_indices=None,
            alpha_bounds=[1e-4, 1e4],
            ext_dm=dm_err,
            target_over_score=0.5,
            target_under_score=0.8,
        )
def test_CotrendingBasisVectors_nonretrieval():
    """Tests CotrendingBasisVectors class without requiring remote data"""

    # ***
    # Constructor
    # Create some generic CotrendingBasisVectors objects

    # Generic CotrendingBasisVectors object
    dataTbl = Table(
        [[1, 2, 3], [False, True, False], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]],
        names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_3"),
    )
    cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format="bkjd")
    cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)
    assert isinstance(cbvs, CotrendingBasisVectors)
    assert cbvs.cbv_indices == [1, 3]
    assert np.all(
        cbvs.time.value == [443.51090033, 443.53133457, 443.55176891])

    # Auto-initiate 'GAP' and 'CADENCENO'
    dataTbl = Table([[2.0, 3.0, 4.0], [3.0, 4.0, 5.0]],
                    names=("VECTOR_3", "VECTOR_12"))
    cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format="bkjd")
    cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)
    assert isinstance(cbvs, CotrendingBasisVectors)
    assert cbvs.cbv_indices == [3, 12]
    assert np.all(cbvs.gap_indicators == [False, False, False])
    assert np.all(cbvs.cadenceno == [0, 1, 2])

    # ***
    # _to_designmatrix
    # Make sure CBVs are the columns in the returned 2-dim array
    dataTbl = Table(
        [
            [1, 2, 3],
            [False, True, False],
            [1.0, 2.0, 3.0],
            [4.0, 5.0, 6.0],
            [7.0, 8.0, 9.0],
        ],
        names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2", "VECTOR_3"),
    )
    cbvTime = Time([1569.44053967, 1569.44192856, 1569.44331746],
                   format="btjd")
    cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
    cbv_dm_name = "test cbv set"
    # CBV index 5 does not exists and should be ingored
    cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1, 3, 5],
                                            name=cbv_dm_name)
    assert cbv_designmatrix.shape == (3, 2)
    assert np.all(cbv_designmatrix["VECTOR_1"] == np.array([1.0, 2.0, 3.0]))
    assert np.all(cbv_designmatrix["VECTOR_3"] == np.array([7.0, 8.0, 9.0]))
    assert cbv_designmatrix.name == cbv_dm_name
    # CBV #2 was not requested, so make sure it is not present
    with pytest.raises(KeyError):
        cbv_designmatrix["VECTOR_2"]

    # ***
    # plot
    ax = cbvs.plot(cbv_indices=[1, 2], ax=None)
    assert isinstance(ax, matplotlib.axes.Axes)

    # There is no CBV # 5 so the third cbv_indices entry will be ignored
    ax = cbvs.plot(cbv_indices=[1, 2, 5], ax=ax)
    assert isinstance(ax, matplotlib.axes.Axes)

    # CBVs use 1-based indexing. Throw error if requesting CBV index 0
    with pytest.raises(ValueError):
        ax = cbvs.plot(cbv_indices=[0, 1, 2], ax=ax)

    # Only 'all' or specific CBV indices can be requested
    with pytest.raises(ValueError):
        ax = cbvs.plot("Doh!")

    # ***
    # align
    # Set up some cadenceno such that both CBV is trimmed and NaNs inserted
    sample_lc = TessLightCurve(
        time=[1, 2, 3, 4, 6, 7],
        flux=[1, 2, 3, 4, 6, 7],
        flux_err=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
        cadenceno=[1, 2, 3, 4, 6, 7],
    )
    dataTbl = Table(
        [
            [1, 2, 3, 5, 6],
            [False, True, False, False, False],
            [1.0, 2.0, 3.0, 5.0, 6.0],
        ],
        names=("CADENCENO", "GAP", "VECTOR_1"),
    )
    cbvTime = Time(
        [
            1569.43915078, 1569.44053967, 1569.44192856, 1569.44470635,
            1569.44609524
        ],
        format="btjd",
    )
    cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
    cbvs = cbvs.align(sample_lc)
    assert np.all(sample_lc.cadenceno == cbvs.cadenceno)
    assert len(cbvs.cadenceno) == 6
    assert len(sample_lc.flux) == 6
    assert np.all(cbvs.gap_indicators.value[[1, 3, 5]])
    # Ignore the warning in to_designmatric due to a low rank matrix
    with warnings.catch_warnings():
        # Instantiating light curves with NaN times will yield a warning
        warnings.simplefilter("ignore", LightkurveWarning)
        cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1])
    assert np.all(
        cbv_designmatrix["VECTOR_1"][[0, 1, 2, 4]] == [1.0, 2.0, 3.0, 6.0])
    assert np.all(np.isnan(cbv_designmatrix["VECTOR_1"][[3, 5]]))

    # ***
    # interpolate
    nLcCadences = 20
    xLc = np.linspace(0.0, 2 * np.pi, num=nLcCadences)
    sample_lc = TessLightCurve(
        time=xLc,
        flux=np.sin(xLc),
        flux_err=np.full(nLcCadences, 0.1),
        cadenceno=np.arange(nLcCadences),
    )
    nCbvCadences = 10
    xCbv = np.linspace(0.0, 2 * np.pi, num=nCbvCadences)
    dataTbl = Table(
        [
            np.arange(nCbvCadences),
            np.full(nCbvCadences, False),
            np.cos(xCbv),
            np.sin(xCbv + np.pi * 0.125),
        ],
        names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2"),
    )
    cbvTime = Time(xCbv, format="btjd")
    cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
    cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)
    assert np.all(cbv_interpolated.time.value == sample_lc.time.value)
    # Extrapolation test
    # If extrapolate=False then all outside values set to 0.0
    xCbv = np.linspace(0.0, 1.5 * np.pi, num=nCbvCadences)
    dataTbl = Table(
        [
            np.arange(nCbvCadences),
            np.full(nCbvCadences, False),
            np.cos(xCbv),
            np.sin(xCbv + np.pi * 0.125),
        ],
        names=("CADENCENO", "GAP", "VECTOR_1", "VECTOR_2"),
    )
    cbvTime = Time(xCbv, format="btjd")
    cbvs = CotrendingBasisVectors(dataTbl, cbvTime)
    cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)
    assert np.all(cbv_interpolated["VECTOR_1"].value[np.nonzero(
        cbv_interpolated.time.value > 1.5 * np.pi)[0]] == 0.0)
    # extrapolate
    cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=True)
    assert np.all(cbv_interpolated["VECTOR_1"].value[np.nonzero(
        cbv_interpolated.time.value > 1.5 * np.pi)[0]] != 0.0)
Exemple #7
0
    def load_lightcurve(self, task):
        """
		Load lightcurve from task ID or full task dictionary.

		Parameters:
			task (integer or dict):

		Returns:
			:class:`lightkurve.TessLightCurve`: Lightcurve for the star in question.

		Raises:
			ValueError: On invalid file format.

		.. codeauthor:: Rasmus Handberg <*****@*****.**>
		"""

        logger = logging.getLogger(__name__)

        # Find the relevant information in the TODO-list:
        if not isinstance(task, dict) or task.get("lightcurve") is None:
            if isinstance(task, dict):
                priority = int(task['priority'])
            else:
                priority = int(task)

            self.cursor.execute(
                "SELECT * FROM todolist INNER JOIN diagnostics ON todolist.priority=diagnostics.priority WHERE todolist.priority=? LIMIT 1;",
                (priority, ))
            task = self.cursor.fetchone()
            if task is None:
                raise ValueError(
                    "Priority could not be found in the TODO list")
            task = dict(task)

        # Get the path of the FITS file:
        fname = os.path.join(self.input_folder, task.get('lightcurve'))
        logger.debug('Loading lightcurve: %s', fname)

        # Load lightcurve file and create a TessLightCurve object:
        if fname.endswith(('.fits.gz', '.fits')):
            with fits.open(fname, mode='readonly', memmap=True) as hdu:
                # Filter out invalid parts of the input lightcurve:
                hdu = _filter_fits_hdu(hdu)

                # Quality flags from the pixels:
                pixel_quality = np.asarray(
                    hdu['LIGHTCURVE'].data['PIXEL_QUALITY'], dtype='int32')

                # Corrections applied to timestamps:
                timecorr = hdu['LIGHTCURVE'].data['TIMECORR']

                # Create the QUALITY column and fill it with flags of bad data points:
                quality = np.zeros_like(hdu['LIGHTCURVE'].data['TIME'],
                                        dtype='int32')
                bad_data = ~np.isfinite(hdu['LIGHTCURVE'].data['FLUX_RAW'])
                bad_data |= (pixel_quality & TESSQualityFlags.DEFAULT_BITMASK
                             != 0)
                quality[bad_data] |= CorrectorQualityFlags.FlaggedBadData

                # Create lightkurve object:
                lc = TessLightCurve(
                    time=hdu['LIGHTCURVE'].data['TIME'],
                    flux=hdu['LIGHTCURVE'].data['FLUX_RAW'],
                    flux_err=hdu['LIGHTCURVE'].data['FLUX_RAW_ERR'],
                    centroid_col=hdu['LIGHTCURVE'].data['MOM_CENTR1'],
                    centroid_row=hdu['LIGHTCURVE'].data['MOM_CENTR2'],
                    quality=quality,
                    cadenceno=np.asarray(hdu['LIGHTCURVE'].data['CADENCENO'],
                                         dtype='int32'),
                    time_format='btjd',
                    time_scale='tdb',
                    targetid=hdu[0].header.get('TICID'),
                    label=hdu[0].header.get('OBJECT'),
                    camera=hdu[0].header.get('CAMERA'),
                    ccd=hdu[0].header.get('CCD'),
                    sector=hdu[0].header.get('SECTOR'),
                    ra=hdu[0].header.get('RA_OBJ'),
                    dec=hdu[0].header.get('DEC_OBJ'),
                    quality_bitmask=CorrectorQualityFlags.DEFAULT_BITMASK,
                    meta={'data_rel': hdu[0].header.get('DATA_REL')})

                # Apply manual exclude flag:
                manexcl = manual_exclude(lc)
                lc.quality[manexcl] |= CorrectorQualityFlags.ManualExclude

        elif fname.endswith(('.noisy', '.sysnoise')):  # pragma: no cover
            data = np.loadtxt(fname)

            # Quality flags from the pixels:
            pixel_quality = np.asarray(data[:, 3], dtype='int32')

            # Corrections applied to timestamps:
            timecorr = np.zeros(data.shape[0], dtype='float32')

            # Change the Manual Exclude flag, since the simulated data
            # and the real TESS quality flags differ in the definition:
            indx = (pixel_quality & 256 != 0)
            pixel_quality[indx] -= 256
            pixel_quality[indx] |= TESSQualityFlags.ManualExclude

            # Create the QUALITY column and fill it with flags of bad data points:
            quality = np.zeros(data.shape[0], dtype='int32')
            bad_data = ~np.isfinite(data[:, 1])
            bad_data |= (pixel_quality & TESSQualityFlags.DEFAULT_BITMASK != 0)
            quality[bad_data] |= CorrectorQualityFlags.FlaggedBadData

            # Create lightkurve object:
            lc = TessLightCurve(
                time=data[:, 0],
                flux=data[:, 1],
                flux_err=data[:, 2],
                quality=quality,
                cadenceno=np.arange(1, data.shape[0] + 1, dtype='int32'),
                time_format='jd',
                time_scale='tdb',
                targetid=task['starid'],
                label="Star%d" % task['starid'],
                camera=task['camera'],
                ccd=task['ccd'],
                sector=2,
                #ra=0,
                #dec=0,
                quality_bitmask=CorrectorQualityFlags.DEFAULT_BITMASK,
                meta={})

        else:
            raise ValueError("Invalid file format")

        # Add additional attributes to lightcurve object:
        lc.pixel_quality = pixel_quality
        lc.timecorr = timecorr

        # Modify the "extra_columns" tuple of the lightkurve object:
        # This is used internally in lightkurve to keep track of the columns in the
        # object, and make sure they are propergated.
        lc.extra_columns = tuple(
            list(lc.extra_columns) + ['timecorr', 'pixel_quality'])

        # Keep the original task in the metadata:
        lc.meta['task'] = task
        lc.meta['additional_headers'] = fits.Header()

        if logger.isEnabledFor(logging.DEBUG):
            with contextlib.redirect_stdout(LoggerWriter(
                    logger, logging.DEBUG)):
                lc.show_properties()

        return lc
Exemple #8
0
 def vetting_validation(self, cpus, indir, tic, sectors, lc_file,
                        transit_depth, period, t0, transit_duration):
     """ Calculates probabilities of the signal being caused by any of the following astrophysical sources:
     TP No unresolved companion. Transiting planet with Porb around target star. (i, Rp)
     EB No unresolved companion. Eclipsing binary with Porb around target star. (i, qshort)
     EBx2P No unresolved companion. Eclipsing binary with 2 × Porb around target star. (i, qshort)
     PTP Unresolved bound companion. Transiting planet with Porb around primary star. (i, Rp, qlong)
     PEB Unresolved bound companion. Eclipsing binary with Porb around primary star. (i, qshort, qlong)
     PEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around primary star. (i, qshort, qlong)
     STP Unresolved bound companion. Transiting planet with Porb around secondary star. (i, Rp, qlong)
     SEB Unresolved bound companion. Eclipsing binary with Porb around secondary star. (i, qshort, qlong)
     SEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around secondary star. (i, qshort, qlong)
     DTP Unresolved background star. Transiting planet with Porb around target star. (i, Rp, simulated star)
     DEB Unresolved background star. Eclipsing binary with Porb around target star. (i, qshort, simulated star)
     DEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around target star. (i, qshort, simulated star)
     BTP Unresolved background star. Transiting planet with Porb around background star. (i, Rp, simulated star)
     BEB Unresolved background star. Eclipsing binary with Porb around background star. (i, qshort, simulated star)
     BEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around background star. (i, qshort, simulated star)
     NTP No unresolved companion. Transiting planet with Porb around nearby star. (i, Rp)
     NEB No unresolved companion. Eclipsing binary with Porb around nearby star. (i, qshort)
     NEBx2P No unresolved companion. Eclipsing binary with 2 × Porb around nearby star. (i, qshort)
     FPP = 1 - (TP + PTP + DTP)
     NFPP = NTP + NEB + NEBx2P
     Giacalone & Dressing (2020) define validated planets as TOIs with NFPP < 10−3 and FPP < 0.015 (or FPP ≤ 0.01,
     when rounding to the nearest percent)
     @param cpus: number of cpus to be used
     @param indir: root directory to store the results
     @param tic: the tess object id for which the analysis will be run
     @param sectors: the sectors of the tic
     @param lc_file: the light curve source file
     @param transit_depth: the depth of the transit signal (ppts)
     @param period: the period of the transit signal /days)
     @param t0: the t0 of the transit signal (days)
     @param transit_duration: the duration of the transit signal (minutes)
     """
     save_dir = indir + "/" + str(tic) + "/triceratops_" + str(uuid.uuid4())
     if os.path.exists(save_dir):
         shutil.rmtree(save_dir, ignore_errors=True)
     if not os.path.exists(save_dir):
         os.makedirs(save_dir)
     sectors = np.array(sectors)
     duration = transit_duration / 60 / 24
     target = tr.target(ID=tic, sectors=sectors)
     # TODO allow user input apertures
     tpfs = lightkurve.search_targetpixelfile("TIC " + str(tic), mission="TESS", cadence="short", sector=sectors.tolist())\
         .download_all()
     star = eleanor.multi_sectors(tic=tic, sectors=sectors, tesscut_size=31)
     apertures = []
     sector_num = 0
     for s in star:
         tpf_idx = [
             data.sector if data.sector == s.sector else -1
             for data in tpfs.data
         ]
         tpf = tpfs[np.where(tpf_idx > np.zeros(len(tpf_idx)))[0][0]]
         pipeline_mask = tpfs[np.where(
             tpf_idx > np.zeros(len(tpf_idx)))[0][0]].pipeline_mask
         pipeline_mask = np.transpose(pipeline_mask)
         pipeline_mask_triceratops = np.zeros(
             (len(pipeline_mask[0]), len(pipeline_mask[:][0]), 2))
         for i in range(0, len(pipeline_mask[0])):
             for j in range(0, len(pipeline_mask[:][0])):
                 pipeline_mask_triceratops[i, j] = [
                     tpf.column + i, tpf.row + j
                 ]
         pipeline_mask_triceratops[~pipeline_mask] = None
         aperture = []
         for i in range(0, len(pipeline_mask_triceratops[0])):
             for j in range(0, len(pipeline_mask_triceratops[:][0])):
                 if not np.isnan(pipeline_mask_triceratops[i, j]).any():
                     aperture.append(pipeline_mask_triceratops[i, j])
         apertures.append(aperture)
         target.plot_field(save=True,
                           fname=save_dir + "/field_S" + str(s.sector),
                           sector=s.sector,
                           ap_pixels=aperture)
         sector_num = sector_num + 1
     apertures = np.array(apertures)
     depth = transit_depth / 1000
     target.calc_depths(depth, apertures)
     target.stars.to_csv(save_dir + "/stars.csv", index=False)
     lc = pd.read_csv(lc_file, header=0)
     time, flux, flux_err = lc["#time"].values, lc["flux"].values, lc[
         "flux_err"].values
     lc_len = len(time)
     zeros_lc = np.zeros(lc_len)
     lc = TessLightCurve(time=time,
                         flux=flux,
                         flux_err=flux_err,
                         quality=zeros_lc)
     lc.extra_columns = []
     lc = lc.fold(period=period, epoch_time=t0, normalize_phase=True)
     folded_plot_range = duration / 2 / period * 5
     inner_folded_range_args = np.where(
         (0 - folded_plot_range < lc.time.value)
         & (lc.time.value < 0 + folded_plot_range))
     lc = lc[inner_folded_range_args]
     lc.time = lc.time * period
     sigma = np.mean(lc.flux_err)
     input_n_times = [
         ValidatorInput(save_dir, copy.deepcopy(target), lc.time.value,
                        lc.flux.value, sigma, period, depth, apertures,
                        value) for value in range(0, self.validation_runs)
     ]
     validator = Validator()
     with Pool(processes=cpus) as pool:
         validation_results = pool.map(validator.validate, input_n_times)
     fpp_sum = 0
     nfpp_sum = 0
     probs_total_df = None
     scenarios_num = len(validation_results[0][2])
     star_num = np.zeros((5, scenarios_num))
     u1 = np.zeros((5, scenarios_num))
     u2 = np.zeros((5, scenarios_num))
     fluxratio_EB = np.zeros((5, scenarios_num))
     fluxratio_comp = np.zeros((5, scenarios_num))
     target = input_n_times[0].target
     target.star_num = np.zeros(scenarios_num)
     target.u1 = np.zeros(scenarios_num)
     target.u2 = np.zeros(scenarios_num)
     target.fluxratio_EB = np.zeros(scenarios_num)
     target.fluxratio_comp = np.zeros(scenarios_num)
     i = 0
     for fpp, nfpp, probs_df, star_num_arr, u1_arr, u2_arr, fluxratio_EB_arr, fluxratio_comp_arr in validation_results:
         if probs_total_df is None:
             probs_total_df = probs_df
         else:
             probs_total_df = pd.concat((probs_total_df, probs_df))
         fpp_sum = fpp_sum + fpp
         nfpp_sum = nfpp_sum + nfpp
         star_num[i] = star_num_arr
         u1[i] = u1_arr
         u2[i] = u2_arr
         fluxratio_EB[i] = fluxratio_EB_arr
         fluxratio_comp[i] = fluxratio_comp_arr
         i = i + 1
     for i in range(0, scenarios_num):
         target.star_num[i] = np.mean(star_num[:, i])
         target.u1[i] = np.mean(u1[:, i])
         target.u2[i] = np.mean(u2[:, i])
         target.fluxratio_EB[i] = np.mean(fluxratio_EB[:, i])
         target.fluxratio_comp[i] = np.mean(fluxratio_comp[:, i])
     with open(save_dir + "/validation.csv", 'w') as the_file:
         the_file.write("FPP,NFPP\n")
         the_file.write(
             str(fpp_sum / self.validation_runs) + "," +
             str(nfpp_sum / self.validation_runs))
     probs_total_df = probs_total_df.groupby("scenario",
                                             as_index=False).mean()
     probs_total_df["scenario"] = pd.Categorical(
         probs_total_df["scenario"], [
             "TP", "EB", "EBx2P", "PTP", "PEB", "PEBx2P", "STP", "SEB",
             "SEBx2P", "DTP", "DEB", "DEBx2P", "BTP", "BEB", "BEBx2P",
             "NTP", "NEB", "NEBx2P"
         ])
     probs_total_df = probs_total_df.sort_values("scenario")
     probs_total_df.to_csv(save_dir + "/validation_scenarios.csv",
                           index=False)
     target.probs = probs_total_df
     # target.plot_fits(save=True, fname=save_dir + "/scenario_fits", time=lc.time.value, flux_0=lc.flux.value,
     #                  sigma_0=sigma)
     return save_dir
Exemple #9
0
 def execute_triceratops(cpus, indir, object_id, sectors, lc_file,
                         transit_depth, period, t0, transit_duration,
                         rp_rstar, a_rstar, bins, scenarios, sigma_mode,
                         contrast_curve_file):
     """ Calculates probabilities of the signal being caused by any of the following astrophysical sources:
     TP No unresolved companion. Transiting planet with Porb around target star. (i, Rp)
     EB No unresolved companion. Eclipsing binary with Porb around target star. (i, qshort)
     EBx2P No unresolved companion. Eclipsing binary with 2 × Porb around target star. (i, qshort)
     PTP Unresolved bound companion. Transiting planet with Porb around primary star. (i, Rp, qlong)
     PEB Unresolved bound companion. Eclipsing binary with Porb around primary star. (i, qshort, qlong)
     PEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around primary star. (i, qshort, qlong)
     STP Unresolved bound companion. Transiting planet with Porb around secondary star. (i, Rp, qlong)
     SEB Unresolved bound companion. Eclipsing binary with Porb around secondary star. (i, qshort, qlong)
     SEBx2P Unresolved bound companion. Eclipsing binary with 2 × Porb around secondary star. (i, qshort, qlong)
     DTP Unresolved background star. Transiting planet with Porb around target star. (i, Rp, simulated star)
     DEB Unresolved background star. Eclipsing binary with Porb around target star. (i, qshort, simulated star)
     DEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around target star. (i, qshort, simulated star)
     BTP Unresolved background star. Transiting planet with Porb around background star. (i, Rp, simulated star)
     BEB Unresolved background star. Eclipsing binary with Porb around background star. (i, qshort, simulated star)
     BEBx2P Unresolved background star. Eclipsing binary with 2 × Porb around background star. (i, qshort, simulated star)
     NTP No unresolved companion. Transiting planet with Porb around nearby star. (i, Rp)
     NEB No unresolved companion. Eclipsing binary with Porb around nearby star. (i, qshort)
     NEBx2P No unresolved companion. Eclipsing binary with 2 × Porb around nearby star. (i, qshort)
     FPP = 1 - (TP + PTP + DTP)
     NFPP = NTP + NEB + NEBx2P
     Giacalone & Dressing (2020) define validated planets as TOIs with NFPP < 10−3 and FPP < 0.015 (or FPP ≤ 0.01,
     when rounding to the nearest percent)
     @param cpus: number of cpus to be used
     @param indir: root directory to store the results
     @param id_int: the object id for which the analysis will be run
     @param sectors: the sectors of the tic
     @param lc_file: the light curve source file
     @param transit_depth: the depth of the transit signal (ppts)
     @param period: the period of the transit signal /days)
     @param t0: the t0 of the transit signal (days)
     @param transit_duration: the duration of the transit signal (minutes)
     @param rp_rstar: radius of planet divided by radius of star
     @param a_rstar: semimajor axis divided by radius of star
     @param bins: the number of bins to average the folded curve
     @param scenarios: the number of scenarios to validate
     @param sigma_mode: the way to calculate the sigma for the validation ['flux_err' | 'binning']
     @param contrast_curve_file: the auxiliary contrast curve file to give more information to the validation engine.
     """
     save_dir = indir + "/triceratops"
     if os.path.exists(save_dir):
         shutil.rmtree(save_dir, ignore_errors=True)
     if not os.path.exists(save_dir):
         os.makedirs(save_dir)
     duration = transit_duration / 60 / 24
     logging.info("----------------------")
     logging.info("Validation procedures")
     logging.info("----------------------")
     logging.info("Pre-processing sectors")
     mission, mission_prefix, id_int = LcBuilder().parse_object_info(
         object_id)
     if mission == "TESS":
         sectors = np.array(sectors)
         sectors_cut = TesscutClass().get_sectors("TIC " + str(id_int))
         sectors_cut = np.array(
             [sector_row["sector"] for sector_row in sectors_cut])
         if len(sectors) != len(sectors_cut):
             logging.warning("WARN: Some sectors were not found in TESSCUT")
             logging.warning("WARN: Sherlock sectors were: " + str(sectors))
             logging.warning("WARN: TESSCUT sectors were: " +
                             str(sectors_cut))
         sectors = np.intersect1d(sectors, sectors_cut)
         if len(sectors) == 0:
             logging.warning(
                 "There are no available sectors to be validated, skipping TRICERATOPS."
             )
             return save_dir, None, None
     logging.info("Will execute validation for sectors: " + str(sectors))
     logging.info("Acquiring triceratops target")
     target = tr.target(ID=id_int, mission=mission, sectors=sectors)
     # TODO allow user input apertures
     logging.info("Reading apertures from directory")
     apertures = yaml.load(open(object_dir + "/apertures.yaml"),
                           yaml.SafeLoader)
     apertures = apertures["sectors"]
     valid_apertures = {}
     for sector, aperture in apertures.items():
         if sector in sectors:
             valid_apertures[sector] = aperture
             target.plot_field(save=True,
                               fname=save_dir + "/field_S" + str(sector),
                               sector=sector,
                               ap_pixels=aperture)
     apertures = np.array(
         [aperture for sector, aperture in apertures.items()])
     valid_apertures = np.array(
         [aperture for sector, aperture in valid_apertures.items()])
     depth = transit_depth / 1000
     if contrast_curve_file is not None:
         logging.info("Reading contrast curve %s", contrast_curve_file)
         plt.clf()
         cc = pd.read_csv(contrast_curve_file, header=None)
         sep, dmag = cc[0].values, cc[1].values
         plt.plot(sep, dmag, 'k-')
         plt.ylim(9, 0)
         plt.ylabel("$\\Delta K_s$", fontsize=20)
         plt.xlabel("separation ('')", fontsize=20)
         plt.savefig(save_dir + "/contrast_curve.png")
         plt.clf()
     logging.info("Calculating validation closest stars depths")
     target.calc_depths(depth, valid_apertures)
     target.stars.to_csv(save_dir + "/stars.csv", index=False)
     lc = pd.read_csv(lc_file, header=0)
     time, flux, flux_err = lc["#time"].values, lc["flux"].values, lc[
         "flux_err"].values
     lc_len = len(time)
     zeros_lc = np.zeros(lc_len)
     logging.info("Preparing validation light curve for target")
     if mission == "TESS":
         lc = TessLightCurve(time=time,
                             flux=flux,
                             flux_err=flux_err,
                             quality=zeros_lc)
     else:
         lc = KeplerLightCurve(time=time,
                               flux=flux,
                               flux_err=flux_err,
                               quality=zeros_lc)
     lc.extra_columns = []
     fig, axs = plt.subplots(1, 1, figsize=(8, 4), constrained_layout=True)
     axs, bin_centers, bin_means, bin_errs = Watson.compute_phased_values_and_fill_plot(
         object_id,
         axs,
         lc,
         period,
         t0 + period / 2,
         depth,
         duration,
         rp_rstar,
         a_rstar,
         bins=bins)
     plt.savefig(save_dir + "/folded_curve.png")
     plt.clf()
     bin_centers = (bin_centers - 0.5) * period
     logging.info("Sigma mode is %s", sigma_mode)
     sigma = np.nanmean(
         bin_errs) if sigma_mode == 'binning' else np.nanmean(flux_err)
     logging.info("Computed folded curve sigma = %s", sigma)
     logging.info("Preparing validation processes inputs")
     input_n_times = [
         ValidatorInput(save_dir, copy.deepcopy(target), bin_centers,
                        bin_means, sigma, period, depth, valid_apertures,
                        value, contrast_curve_file)
         for value in range(0, scenarios)
     ]
     logging.info("Start validation processes")
     #TODO fix usage of cpus returning same value for all executions
     with Pool(processes=1) as pool:
         validation_results = pool.map(TriceratopsThreadValidator.validate,
                                       input_n_times)
     logging.info("Finished validation processes")
     fpp_sum = 0
     fpp2_sum = 0
     fpp3_sum = 0
     nfpp_sum = 0
     probs_total_df = None
     scenarios_num = len(validation_results[0][4])
     star_num = np.zeros((5, scenarios_num))
     u1 = np.zeros((5, scenarios_num))
     u2 = np.zeros((5, scenarios_num))
     fluxratio_EB = np.zeros((5, scenarios_num))
     fluxratio_comp = np.zeros((5, scenarios_num))
     target = input_n_times[0].target
     target.star_num = np.zeros(scenarios_num)
     target.u1 = np.zeros(scenarios_num)
     target.u2 = np.zeros(scenarios_num)
     target.fluxratio_EB = np.zeros(scenarios_num)
     target.fluxratio_comp = np.zeros(scenarios_num)
     logging.info("Computing final probabilities from the %s scenarios",
                  scenarios)
     i = 0
     with open(save_dir + "/validation.csv", 'w') as the_file:
         the_file.write("scenario,FPP,NFPP,FPP2,FPP3+\n")
         for fpp, nfpp, fpp2, fpp3, probs_df, star_num_arr, u1_arr, u2_arr, fluxratio_EB_arr, fluxratio_comp_arr \
                 in validation_results:
             if probs_total_df is None:
                 probs_total_df = probs_df
             else:
                 probs_total_df = pd.concat((probs_total_df, probs_df))
             fpp_sum = fpp_sum + fpp
             fpp2_sum = fpp2_sum + fpp2
             fpp3_sum = fpp3_sum + fpp3
             nfpp_sum = nfpp_sum + nfpp
             star_num[i] = star_num_arr
             u1[i] = u1_arr
             u2[i] = u2_arr
             fluxratio_EB[i] = fluxratio_EB_arr
             fluxratio_comp[i] = fluxratio_comp_arr
             the_file.write(
                 str(i) + "," + str(fpp) + "," + str(nfpp) + "," +
                 str(fpp2) + "," + str(fpp3) + "\n")
             i = i + 1
         for i in range(0, scenarios_num):
             target.star_num[i] = np.mean(star_num[:, i])
             target.u1[i] = np.mean(u1[:, i])
             target.u2[i] = np.mean(u2[:, i])
             target.fluxratio_EB[i] = np.mean(fluxratio_EB[:, i])
             target.fluxratio_comp[i] = np.mean(fluxratio_comp[:, i])
         fpp_sum = fpp_sum / scenarios
         nfpp_sum = nfpp_sum / scenarios
         fpp2_sum = fpp2_sum / scenarios
         fpp3_sum = fpp3_sum / scenarios
         logging.info("---------------------------------")
         logging.info("Final probabilities computed")
         logging.info("---------------------------------")
         logging.info("FPP=%s", fpp_sum)
         logging.info("NFPP=%s", nfpp_sum)
         logging.info("FPP2(Lissauer et al, 2012)=%s", fpp2_sum)
         logging.info("FPP3+(Lissauer et al, 2012)=%s", fpp3_sum)
         the_file.write("MEAN" + "," + str(fpp_sum) + "," + str(nfpp_sum) +
                        "," + str(fpp2_sum) + "," + str(fpp3_sum))
     probs_total_df = probs_total_df.groupby("scenario",
                                             as_index=False).mean()
     probs_total_df["scenario"] = pd.Categorical(
         probs_total_df["scenario"], [
             "TP", "EB", "EBx2P", "PTP", "PEB", "PEBx2P", "STP", "SEB",
             "SEBx2P", "DTP", "DEB", "DEBx2P", "BTP", "BEB", "BEBx2P",
             "NTP", "NEB", "NEBx2P"
         ])
     probs_total_df = probs_total_df.sort_values("scenario")
     probs_total_df.to_csv(save_dir + "/validation_scenarios.csv",
                           index=False)
     target.probs = probs_total_df
     # target.plot_fits(save=True, fname=save_dir + "/scenario_fits", time=lc.time.value, flux_0=lc.flux.value,
     #                  flux_err_0=sigma)
     return save_dir
Exemple #10
0
                    'toi_id']  #TIC ID for the object - used for plot title
                #				tdepth = df2.loc['Transit Depth']
                #				comments = df2.loc['Comment']			#Any existing comments on the object

                print("Epoch of first transit is {} [BJD - 2457000]".format(
                    epoch))
                print("Orbital period is {} days".format(period))
                print("Transit duration is {} hours ({} days)".format(
                    T_dur, T_dur / 24.))
                #				print("Existing comments on this object are: {}".format(comments))

                time, flux, fluxerr, time_whole, raw_flux = tess_LC_dataload_spoc(
                    filenames[i])

                if lightkurve == True:
                    lc = TessLightCurve(time, flux)
                    flat_lc = lc.flatten(window_length=windowlength)

                    time = flat_lc.time
                    flux = flat_lc.flux

                phase, phase_days = phase_fold(time, epoch, period)

                flux_normalised, err_normalised = normalise_LC(
                    flux, fluxerr, phase, period, T_dur)

                if mod:
                    flux_best, p_bin, f_bin, e_bin, best_fit_params, fit_val = best_fit_LC_solve(
                        phase,
                        flux_normalised,
                        period,
Exemple #11
0
    def load_lightcurve(self, task, ver='RAW'):
        """
		Load lightcurve from task ID or full task dictionary.

		Parameters:
			task (integer or dict):

		Returns:
			``lightkurve.TessLightCurve``: Lightcurve for the star in question.

		Raises:
			ValueError: On invalid file format.

		.. codeauthor:: Rasmus Handberg <*****@*****.**>
		"""

        logger = logging.getLogger(__name__)

        # Find the relevant information in the TODO-list:
        if not isinstance(task, dict) or task.get("lightcurve") is None:
            self.cursor.execute(
                "SELECT * FROM todolist INNER JOIN diagnostics ON todolist.priority=diagnostics.priority WHERE todolist.priority=? LIMIT 1;",
                (task, ))
            task = self.cursor.fetchone()
            if task is None:
                raise ValueError(
                    "Priority could not be found in the TODO list")
            task = dict(task)

        # Get the path of the FITS file:
        fname = os.path.join(self.input_folder, task.get('lightcurve'))
        logger.debug('Loading lightcurve: %s', fname)

        if fname.endswith('.fits') or fname.endswith('.fits.gz'):
            with fits.open(fname, mode='readonly', memmap=True) as hdu:
                # Quality flags from the pixels:
                pixel_quality = np.asarray(
                    hdu['LIGHTCURVE'].data['PIXEL_QUALITY'], dtype='int32')

                # Create the QUALITY column and fill it with flags of bad data points:
                quality = np.zeros_like(hdu['LIGHTCURVE'].data['TIME'],
                                        dtype='int32')

                if ver == 'RAW':
                    LC = hdu['LIGHTCURVE'].data['FLUX_RAW']
                    LC_ERR = hdu['LIGHTCURVE'].data['FLUX_RAW_ERR'],
                elif ver == 'CORR':
                    LC = hdu['LIGHTCURVE'].data['FLUX_CORR']
                    LC_ERR = hdu['LIGHTCURVE'].data['FLUX_CORR_ERR'],

                bad_data = ~np.isfinite(LC)

                bad_data |= (pixel_quality & TESSQualityFlags.DEFAULT_BITMASK
                             != 0)
                quality[bad_data] |= CorrectorQualityFlags.FlaggedBadData

                # Create lightkurve object:
                lc = TessLightCurve(
                    time=hdu['LIGHTCURVE'].data['TIME'],
                    flux=LC,
                    flux_err=LC_ERR,
                    centroid_col=hdu['LIGHTCURVE'].data['MOM_CENTR1'],
                    centroid_row=hdu['LIGHTCURVE'].data['MOM_CENTR2'],
                    quality=quality,
                    cadenceno=np.asarray(hdu['LIGHTCURVE'].data['CADENCENO'],
                                         dtype='int32'),
                    time_format='btjd',
                    time_scale='tdb',
                    targetid=hdu[0].header.get('TICID'),
                    label=hdu[0].header.get('OBJECT'),
                    camera=hdu[0].header.get('CAMERA'),
                    ccd=hdu[0].header.get('CCD'),
                    sector=hdu[0].header.get('SECTOR'),
                    ra=hdu[0].header.get('RA_OBJ'),
                    dec=hdu[0].header.get('DEC_OBJ'),
                    quality_bitmask=CorrectorQualityFlags.DEFAULT_BITMASK,
                    meta={})

                # Apply manual exclude flag:
                manexcl = manual_exclude(lc)
                lc.quality[manexcl] |= CorrectorQualityFlags.ManualExclude

        else:
            raise ValueError("Invalid file format")

        # Add additional attributes to lightcurve object:
        lc.pixel_quality = pixel_quality

        # Keep the original task in the metadata:
        lc.meta['task'] = task
        lc.meta['additional_headers'] = fits.Header()

        if logger.isEnabledFor(logging.DEBUG):
            lc.show_properties()

        return lc