Example #1
0
    def add_ldtk_prior(self, teff: tuple, logg: tuple, z: tuple, passbands: tuple,
                       uncertainty_multiplier: float = 3, **kwargs) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        passbands
        uncertainty_multiplier

        Returns
        -------

        """
        if 'pbs' in kwargs.keys():
            raise DeprecationWarning("The 'pbs' argument has been renamed to 'passbands'")

        if isinstance(passbands[0], str):
            raise DeprecationWarning(
                'Passing passbands by name has been deprecated, they should be now Filter instances.')

        self.ldsc = LDPSetCreator(teff, logg, z, list(passbands))
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)
        def ldprior(pv):
            return self.ldps.lnlike_tq(pv[:, self._sl_ld].reshape([pv.shape[0], -1, 2]))
        self._additional_log_priors.append(ldprior)
Example #2
0
    def __init__(self,
                 pbs: Tuple,
                 teff: Tuple[float, float],
                 logg: Tuple[float, float],
                 z: Tuple[float, float],
                 samples: int = 500,
                 frozen: bool = False,
                 cache: Optional[Union[str, Path]] = None,
                 dataset: str = 'vis-lowres'):
        super().__init__()
        self._sc = LDPSetCreator(teff,
                                 logg,
                                 z,
                                 pbs,
                                 cache=cache,
                                 dataset=dataset)
        self._ps = self._sc.create_profiles(samples)
        self._i = 0

        self.npb = len(pbs)
        self.nsamples = samples
        self.frozen = frozen
        self.z = self._ps._z
        self.mu = self._ps._mu
        self.profiles = self._ps._ldps
        self.mean_profiles = self._ps.profile_averages
    def __init__(self, spectrum, T_eff, logg, z):
        '''
        Object to generate realistic limb darkening parameters

        Parameters
        ----------
        spectrum : TransitCurveGen.Spectrum
            Spectrum we want the LD coeffs for
        T_eff : tuple
            The effective temperature of the host star, given as
            (value, uncertainty) pair
        logg : tuple
            The log_10 of the surface gravity of the host star, with gravity
            measured in cm/s2. Should be given as a (value, uncertainty) pair.
        z : tuple
            The metalicity of the host, given as a (value, uncertainty) pair.
        '''

        # Make the filters
        log_wl = np.log10(spectrum.wavelengths)
        delta = log_wl[1] - log_wl[0]

        edges = [log_wl[0] - delta] + [wl + delta for wl in log_wl]
        edges = np.array(edges)

        self.filters = [
            BoxcarFilter('{}'.format(i), edges[i], edges[i + 1])
            for i in range(len(edges) - 1)
        ]

        print(len(self.filters), len(spectrum.wavelengths))

        sc = LDPSetCreator(teff=T_eff, logg=logg, z=z, filters=self.filters)

        self.ld_profiles = sc.create_profiles()
Example #4
0
    def setupInitialConditions(self):

        self.initial = {}
        self.initial['planet'] = Planet(J=0.0, \
            k=0.107509268533, \
            rs_over_a =0.136854018274, \
            b =0.143228040337, \
            q=0.0, \
            period=3.95023867775, \
            t0=2456416.39659, \
            dt = -0.000109927092499, \
            esinw=0.0, \
            ecosw=0.0)
        self.initial['star'] = Star(u1=0.47,
                                    u2=0.33,
                                    temperature=6170.0,
                                    logg=4.27,
                                    metallicity=0.26)
        self.initial['instrument'] = Instrument(self.bins[0].tlc, order=2)

        self.speak('using LDTk to estimate limb-darkening coefficients')

        self.ldtk_filename = os.path.join(self.binningdirectory,
                                          'ldtk_coefs.npy')
        try:
            self.ldtk_coefs, self.ldtk_coefuncertainties = np.load(
                self.ldtk_filename)
            self.speak('loaded LD coefficients from {}'.format(
                self.ldtk_filename))
        except IOError:
            # create some filters for the limb-darkening
            self.ldtk_filters = [
                BoxcarFilter(b.identifier, b.left / 10, b.right / 10)
                for b in self.bins
            ]

            # set up the profile creator
            self.ldtk_sc = LDPSetCreator(
                teff=(6170, 80),  # Define your star, and the code
                logg=(4.27, 0.07),  # downloads the uncached stellar
                z=(0.26, 0.15),  # spectra from the Husser et al.
                filters=self.ldtk_filters)  # FTP server automatically.

            self.ldtk_profiles = self.ldtk_sc.create_profiles()

            self.ldtk_coefs, self.ldtk_coefuncertainties = self.ldtk_profiles.coeffs_qd(
                do_mc=False)
            np.save(self.ldtk_filename,
                    (self.ldtk_coefs, self.ldtk_coefuncertainties))
            self.speak('saved new LD coefficients to {}'.format(
                self.ldtk_filename))
Example #5
0
def ldtk_ldc(lambda_min, lambda_max, Teff, Teff_unc, logg, logg_unc, z, z_unc):
    """
    Function to estimate quadratic limb darkening coefficients for a given star
    
    Parameters:
    ----------
    lambda_min: Start wavelength of the bandpass filter.
    
    lambda_max: End  wavelength of the bandpass filter.

    Teff: Effective temperature of the star.

    Teff_unc: Uncertainty in Teff.

    logg: Surface gravity of the star.

    logg_unc: Uncertainty in logg.

    z: Metallicity of the star.

    z_unc: Uncertainty in z.
    

    Returns
    -------
    cq, eq : Each an array giving the 2 quadractic limb darkening parameters and the errors associated with them 

    
    """

    from ldtk import LDPSetCreator, BoxcarFilter

    # Define your passbands. Boxcar filters useful in transmission spectroscopy
    filters = [BoxcarFilter('a', lambda_min, lambda_max)]

    sc = LDPSetCreator(
        teff=(Teff, Teff_unc),  # Define your star, and the code
        logg=(logg, logg_unc),  # downloads the uncached stellar
        z=(z, z_unc),  # spectra from the Husser et al.
        filters=filters)  # FTP server automatically.

    ps = sc.create_profiles()  # Create the limb darkening profiles
    cq, eq = ps.coeffs_qd(do_mc=True)  # Estimate quadratic law coefficients

    #lnlike = ps.lnlike_qd([[0.45,0.15],      # Calculate the quadratic law log
    #                       [0.35,0.10],      # likelihood for a set of coefficients
    #                       [0.25,0.05]])     # (returns the joint likelihood)

    #lnlike = ps.lnlike_qd([0.25,0.05],flt=0) # Quad. law log L for the first filter
    return cq, eq
Example #6
0
    def add_ldtk_prior(self, teff: tuple, logg: tuple, z: tuple,
                       uncertainty_multiplier: float = 3,
                       pbs: tuple = ('g', 'r', 'i', 'z')) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        uncertainty_multiplier
        pbs

        Returns
        -------

        """
        fs = {n: f for n, f in zip('g r i z'.split(), (sdss_g, sdss_r, sdss_i, sdss_z))}
        filters = [fs[k] for k in pbs]
        self.ldsc = LDPSetCreator(teff, logg, z, filters)
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)
        def ldprior(pv):
            return self.ldps.lnlike_tq(pv[self._sl_ld])
        self.lnpriors.append(ldprior)
    def add_ldtk_prior(
        self,
        teff: tuple,
        logg: tuple,
        z: tuple,
        uncertainty_multiplier: float = 3,
        pbs: tuple = ('g', 'r', 'i', 'z')
    ) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        uncertainty_multiplier
        pbs

        Returns
        -------

        """
        fs = {
            n: f
            for n, f in zip('g r i z'.split(), (sdss_g, sdss_r, sdss_i,
                                                sdss_z))
        }
        filters = [fs[k] for k in pbs]
        self.ldsc = LDPSetCreator(teff, logg, z, filters)
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)

        def ldprior(pv):
            pv = atleast_2d(pv)
            lnl = zeros(pv.shape[0])
            for i in range(pv.shape[0]):
                lnl[i] = self.ldps.lnlike_tq(pv[i, self._sl_ld])
            return lnl

        self.lnpriors.append(ldprior)
Example #8
0
class LDTkLDModel(LDModel):
    def __init__(self,
                 pbs: Tuple,
                 teff: Tuple[float, float],
                 logg: Tuple[float, float],
                 z: Tuple[float, float],
                 samples: int = 500,
                 frozen: bool = False,
                 cache: Optional[Union[str, Path]] = None,
                 dataset: str = 'vis-lowres'):
        super().__init__()
        self._sc = LDPSetCreator(teff,
                                 logg,
                                 z,
                                 pbs,
                                 cache=cache,
                                 dataset=dataset)
        self._ps = self._sc.create_profiles(samples)
        self._i = 0

        self.npb = len(pbs)
        self.nsamples = samples
        self.frozen = frozen
        self.z = self._ps._z
        self.mu = self._ps._mu
        self.profiles = self._ps._ldps
        self.mean_profiles = self._ps.profile_averages

    def thaw(self):
        self.frozen = False

    def freeze(self):
        self.frozen = True

    def __call__(self,
                 mu: ndarray,
                 x: Optional[ndarray] = None) -> Tuple[ndarray, ndarray]:
        npv = 1 if x is None else x.shape[0]
        self._i = i = randint(0, self.nsamples)
        if self.frozen:
            return eval_ldm_frozen(mu, self.mu, self.z, self.mean_profiles,
                                   npv)
        else:
            return eval_ldm(mu, self.mu, self.z, self.profiles, npv,
                            self.nsamples)

    def _evaluate(self, mu: ndarray, x: ndarray) -> ndarray:
        raise NotImplementedError

    def _integrate(self, x: ndarray) -> float:
        raise NotImplementedError
Example #9
0
class BaseLPF(LogPosteriorFunction):
    _lpf_name = 'BaseLPF'

    def __init__(self, name: str, passbands: list, times: list = None, fluxes: Iterable = None, errors: list = None,
                 pbids: list = None, covariates: list = None, wnids: list = None, tm: TransitModel = None,
                 nsamples: tuple = 1, exptimes: tuple = 0., init_data=True, result_dir: Path = None, tref: float = 0.0,
                 lnlikelihood: str = 'wn'):
        """The base Log Posterior Function class.

        The `BaseLPF` class creates the basis for transit light curve analyses using `PyTransit`. This class can be
        used in a basic analysis directly, or it can be inherited to create a basis for a more complex analysis.

        Parameters
        ----------
        name: str
            Name of the log posterior function instance.

        passbands: Iterable
            List of unique passband names (filters) that the light curves have been observed in.

        times: Iterable
            List of 1d ndarrays each containing the mid-observation times for a single light curve.

        fluxes: Iterable
            List of 1d ndarrays each containing the normalized fluxes for a single light curve.

        errors: Iterable
            List of 1d ndarrays each containing the flux measurement uncertainties for a single light curvel.

        pbids: Iterable of ints
            List of passband indices mapping each light curve to a single passband.

        covariates: Iterable
            List of covariates one 2d narray per light curve.

        wnids: Iterable of ints
            List of noise set indices mapping each light curve to a single noise set.

        tm: TransitModel
            Transitmodel to use instead of the default model.

        nsamples: list[int]
            List of supersampling factors.  The values should be integers and given one per light curve.

        exptimes: list[float]
            List of exposure times. The values should be floats with the time given in days.

        init_data: bool
            Set to `False` to allow the LPF to be initialized without data. This is mainly for debugging.

        result_dir: Path, optional
            Default saving directory

        tref: float, optional
            Reference time
        """

        self._pre_initialisation()

        super().__init__(name=name, result_dir=result_dir)

        self.tm = tm or QuadraticModel(klims=(0.01, 0.75), nk=512, nz=512)
        self._tref = tref

        # Passbands
        # ---------
        # Passbands should be arranged from blue to red
        if isinstance(passbands, (list, tuple, ndarray)):
            self.passbands = passbands
        else:
            self.passbands = [passbands]
        self.npb = npb = len(self.passbands)

        self.nsamples = None
        self.exptimes = None
        self.lnlikelihood_type = lnlikelihood.lower()

        # Declare high-level objects
        # --------------------------
        self._lnlikelihood_models = []
        self._baseline_models = []
        self.ps = None          # Parametrisation
        self.de = None          # Differential evolution optimiser
        self.sampler = None     # MCMC sampler
        self.instrument = None  # Instrument
        self.ldsc = None        # Limb darkening set creator
        self.ldps = None        # Limb darkening profile set
        self.cntm = None        # Contamination model

        # Declare data arrays and variables
        # ---------------------------------
        self.nlc: int = 0                # Number of light curves
        self.n_noise_blocks: int = 0     # Number of noise blocks
        self.noise_ids = None
        self.times: list = None          # List of time arrays
        self.fluxes: list = None         # List of flux arrays
        self.errors: list = None         # List of flux uncertainties
        self.covariates: list = None     # List of covariates
        self.wn: ndarray = None          # Array of white noise estimates for each light curve
        self.timea: ndarray = None       # Array of concatenated times
        self.mfluxa: ndarray = None      # Array of concatenated model fluxes
        self.ofluxa: ndarray = None      # Array of concatenated observed fluxes
        self.errora: ndarray = None      # Array of concatenated model fluxes

        self.lcids: ndarray = None       # Array of light curve indices for each datapoint
        self.pbids: ndarray = None       # Array of passband indices for each light curve
        self.lcslices: list = None       # List of light curve slices

        if init_data:
            # Set up the observation data
            # ---------------------------
            self._init_data(times = times, fluxes = fluxes, pbids = pbids, covariates = covariates,
                            errors = errors, wnids = wnids, nsamples = nsamples, exptimes = exptimes)

            # Set up the parametrisation
            # --------------------------
            self._init_parameters()

            # Inititalise the instrument
            # --------------------------
            self._init_instrument()

        self._init_lnlikelihood()
        self._init_baseline()
        self._post_initialisation()


    def _init_data(self, times: Union[List, ndarray], fluxes: Union[List, ndarray], pbids: Union[List, ndarray] = None,
                   covariates: Union[List, ndarray] = None, errors: Union[List, ndarray] = None, wnids: Union[List, ndarray] = None,
                   nsamples: Union[int, ndarray, Iterable] = 1, exptimes: Union[float, ndarray, Iterable] = 0.):

        if isinstance(times, ndarray) and times.ndim == 1 and times.dtype == float:
            times = [times]

        if isinstance(fluxes, ndarray) and fluxes.ndim == 1 and fluxes.dtype == float:
            fluxes = [fluxes]

        if pbids is None:
            if self.pbids is None:
                self.pbids = zeros(len(fluxes), int)
        else:
            self.pbids = atleast_1d(pbids).astype('int')

        self.nlc = len(times)
        self.times = times
        self.fluxes = fluxes
        self.wn = [nanstd(diff(f)) / sqrt(2) for f in fluxes]
        self.timea = concatenate(self.times)
        self.ofluxa = concatenate(self.fluxes)
        self.mfluxa = zeros_like(self.ofluxa)
        self.lcids = concatenate([full(t.size, i) for i, t in enumerate(self.times)])

        # TODO: Noise IDs get scrambled when removing transits, fix!!!
        if wnids is None:
            if self.noise_ids is None:
                self.noise_ids = zeros(self.nlc, int)
                self.n_noise_blocks = 1
        else:
            self.noise_ids = asarray(wnids)
            self.n_noise_blocks = len(unique(self.noise_ids))
            assert self.noise_ids.size == self.nlc, "Need one noise block id per light curve."
            assert self.noise_ids.max() == self.n_noise_blocks - 1, "Error initialising noise block ids."

        if isscalar(nsamples):
            self.nsamples = full(self.nlc, nsamples)
            self.exptimes = full(self.nlc, exptimes)
        else:
            assert (len(nsamples) == self.nlc) and (len(exptimes) == self.nlc)
            self.nsamples = asarray(nsamples, 'int')
            self.exptimes = asarray(exptimes)

        self.tm.set_data(self.timea-self._tref, self.lcids, self.pbids, self.nsamples, self.exptimes)

        if errors is None:
            self.errors = [full(t.size, nan) for t in self.times]
        else:
            self.errors = errors
        self.errora = concatenate(self.errors)

        # Initialise the light curves slices
        # ----------------------------------
        self.lcslices = []
        sstart = 0
        for i in range(self.nlc):
            s = self.times[i].size
            self.lcslices.append(s_[sstart:sstart + s])
            sstart += s

        # Initialise the covariate arrays, if given
        # -----------------------------------------
        if covariates is not None:
            self.covariates = covariates
            for cv in self.covariates:
                cv = (cv - cv.mean(0)) / cv.std(0)
            #self.ncovs = self.covariates[0].shape[1]
            #self.covsize = array([c.size for c in self.covariates])
            #self.covstart = concatenate([[0], self.covsize.cumsum()[:-1]])
            #self.cova = concatenate(self.covariates)

    def _add_lnlikelihood_model(self, lnl):
        self._lnlikelihood_models.append(lnl)

    def _add_baseline_model(self, blm):
        self._baseline_models.append(blm)

    def _init_parameters(self):
        self.ps = ParameterSet()
        self._init_p_orbit()
        self._init_p_planet()
        self._init_p_limb_darkening()
        self._init_p_baseline()
        self.ps.freeze()

    def _init_p_orbit(self):
        """Orbit parameter initialisation.
        """
        porbit = [
            GParameter('tc',  'zero_epoch',       'd',      N(0.0,  0.1), (-inf, inf)),
            GParameter('p',   'period',           'd',      N(1.0, 1e-5), (0,    inf)),
            GParameter('rho', 'stellar_density',  'g/cm^3', U(0.1, 25.0), (0,    inf)),
            GParameter('b',   'impact_parameter', 'R_s',    U(0.0,  1.0), (0,      1))]
        self.ps.add_global_block('orbit', porbit)

    def _init_p_planet(self):
        """Planet parameter initialisation.
        """
        pk2 = [PParameter('k2', 'area_ratio', 'A_s', U(0.05**2, 0.2**2), (0, inf))]
        self.ps.add_passband_block('k2', 1, 1, pk2)
        self._pid_k2 = repeat(self.ps.blocks[-1].start, self.npb)
        self._start_k2 = self.ps.blocks[-1].start
        self._sl_k2 = self.ps.blocks[-1].slice

    def _init_p_limb_darkening(self):
        """Limb darkening parameter initialisation.
        """
        pld = concatenate([
            [PParameter(f'q1_{pb}', 'q1 coefficient {pb}', '', U(0, 1), bounds=(0, 1)),
             PParameter(f'q2_{pb}', 'q2 coefficient {pb}', '', U(0, 1), bounds=(0, 1))]
            for i,pb in enumerate(self.passbands)])
        self.ps.add_passband_block('ldc', 2, self.npb, pld)
        self._sl_ld = self.ps.blocks[-1].slice
        self._start_ld = self.ps.blocks[-1].start

    def _init_p_baseline(self):
        pass

    def _init_p_noise(self):
        pass

    def _init_instrument(self):
        pass

    def _pre_initialisation(self):
        pass

    def _post_initialisation(self):
        pass

    def _init_lnlikelihood(self):
        if self.lnlikelihood_type == 'wn':
            self._add_lnlikelihood_model(WNLogLikelihood(self))
        elif self.lnlikelihood_type == 'celerite':
            self._add_lnlikelihood_model(CeleriteLogLikelihood(self))
        else:
            raise NotImplementedError

    def _init_baseline(self):
        pass

    def create_pv_population(self, npop=50):
        pvp = self.ps.sample_from_prior(npop)

        # With LDTk
        # ---------
        #
        # Use LDTk to create the sample if LDTk has been initialised.
        #
        if self.ldps:
            istart = self._start_ld
            cms, ces = self.ldps.coeffs_tq()
            for i, (cm, ce) in enumerate(zip(cms.flat, ces.flat)):
                pvp[:, i + istart] = normal(cm, ce, size=pvp.shape[0])

        # No LDTk
        # -------
        #
        # Ensure that the total limb darkening decreases towards
        # red passbands.
        #
        else:
            ldsl = self._sl_ld
            for i in range(pvp.shape[0]):
                pid = argsort(pvp[i, ldsl][::2])[::-1]
                pvp[i, ldsl][::2] = pvp[i, ldsl][::2][pid]
                pvp[i, ldsl][1::2] = pvp[i, ldsl][1::2][pid]

        return pvp

    def add_prior(self, prior):
        self._additional_log_priors.append(prior)

    def baseline(self, pv):
        if self._baseline_models:
            if pv.ndim == 1:
                bl = ones_like(self.timea)
            else:
                bl = ones((pv.shape[0], self.timea.size))
            for blm in self._baseline_models:
                bl = blm(pv, bl)
            return bl
        else:
            return 1.

    def trends(self, pv):
        """Additive trends"""
        return 0.

    def transit_model(self, pv, copy=True):
        pv = atleast_2d(pv)
        ldc = map_ldc(pv[:,self._sl_ld])
        zero_epoch = pv[:,0] - self._tref
        period = pv[:,1]
        smaxis = as_from_rhop(pv[:,2], period)
        inclination  = i_from_ba(pv[:,3], smaxis)
        radius_ratio = sqrt(pv[:,4:5])
        return self.tm.evaluate(radius_ratio, ldc, zero_epoch, period, smaxis, inclination)

    def flux_model(self, pv):
        baseline    = self.baseline(pv)
        trends      = self.trends(pv)
        model_flux = self.transit_model(pv)
        return baseline * model_flux + trends

    def residuals(self, pv):
        return self.ofluxa - self.flux_model(pv)

    def lnlikelihood(self, pvp):
        """Log likelihood for a 1D or 2D array of model parameters.

        Parameters
        ----------
        pvp: ndarray
            Either a 1D parameter vector or a 2D parameter array.

        Returns
        -------
            Log likelihood for the given parameter vector(s).
        """
        fmodel = self.flux_model(pvp)

        if pvp.ndim == 1:
            lnl = 0.
        else:
            lnl = zeros(pvp.shape[0])

        for lnlikelihood in self._lnlikelihood_models:
            lnl += lnlikelihood(pvp, fmodel)
        return lnl

    def set_radius_ratio_prior(self, kmin, kmax):
        """Set a uniform prior on all radius ratios."""
        for p in self.ps[self._sl_k2]:
            p.prior = U(kmin ** 2, kmax ** 2)

    def add_t14_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the transit duration.

        Parameters
        ----------
        mean: float
            Mean of the normal distribution
        std: float
            Standard deviation of the normal distribution.
        """

        def T14(pv):
            pv = atleast_2d(pv)
            a = as_from_rhop(pv[:, 2], pv[:, 1])
            t14 = duration_eccentric(pv[:, 1], sqrt(pv[:, 4]), a, arccos(pv[:, 3] / a), 0, 0, 1)
            return norm.logpdf(t14, mean, std)

        self._additional_log_priors.append(T14)

    def add_as_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the scaled semi-major axis :math:`(a / R_\star)`.

        Parameters
        ----------
        mean: float
            Mean of the normal distribution.
        std: float
            Standard deviation of the normal distribution
        """
        def as_prior(pv):
            a = as_from_rhop(pv[2], pv[1])
            return norm.logpdf(a, mean, std)
        self._additional_log_priors.append(as_prior)

    def add_ldtk_prior(self, teff: tuple, logg: tuple, z: tuple, passbands: tuple,
                       uncertainty_multiplier: float = 3, **kwargs) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        passbands
        uncertainty_multiplier

        Returns
        -------

        """
        if 'pbs' in kwargs.keys():
            raise DeprecationWarning("The 'pbs' argument has been renamed to 'passbands'")

        if isinstance(passbands[0], str):
            raise DeprecationWarning(
                'Passing passbands by name has been deprecated, they should be now Filter instances.')

        self.ldsc = LDPSetCreator(teff, logg, z, list(passbands))
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)
        def ldprior(pv):
            return self.ldps.lnlike_tq(pv[:, self._sl_ld].reshape([pv.shape[0], -1, 2]))
        self._additional_log_priors.append(ldprior)

    def remove_outliers(self, sigma=5):
        fmodel = squeeze(self.flux_model(self.de.minimum_location))
        covariates = [] if self.covariates is not None else None
        times, fluxes, lcids, errors = [], [], [], []
        for i in range(len(self.times)):
            res = self.fluxes[i] - fmodel[self.lcslices[i]]
            mask = ~sigma_clip(res, sigma=sigma).mask
            times.append(self.times[i][mask])
            fluxes.append(self.fluxes[i][mask])
            if covariates is not None:
                covariates.append(self.covariates[i][mask])
            if self.errors is not None:
                errors.append(self.errors[i][mask])

        self._init_data(times=times, fluxes=fluxes, covariates=self.covariates, pbids=self.pbids,
                        errors=(errors if self.errors is not None else None), wnids=self.noise_ids,
                        nsamples=self.nsamples, exptimes=self.exptimes)

    def remove_transits(self, tids):
        m = ones(len(self.times), bool)
        m[tids] = False
        self._init_data(self.times[m], self.fluxes[m], self.pbids[m],
                        self.covariates[m] if self.covariates is not None else None,
                        self.errors[m], self.noise_ids[m], self.nsamples[m], self.exptimes[m])
        self._init_parameters()

    def posterior_samples(self, burn: int = 0, thin: int = 1, derived_parameters: bool = True):
        df = super().posterior_samples(burn=burn, thin=thin)
        if derived_parameters:
            for k2c in df.columns[self._sl_k2]:
                df[k2c.replace('k2', 'k')] = sqrt(df[k2c])
            df['a'] = as_from_rhop(df.rho.values, df.p.values)
            df['inc'] = i_from_baew(df.b.values, df.a.values, 0., 0.)

            average_ks = sqrt(df.iloc[:, self._sl_k2]).mean(1).values
            df['t14'] = d_from_pkaiews(df.p.values, average_ks, df.a.values, df.inc.values, 0., 0., 1, kind=14)
            df['t23'] = d_from_pkaiews(df.p.values, average_ks, df.a.values, df.inc.values, 0., 0., 1, kind=23)
        return df

    def plot_light_curves(self, method='de', ncol: int = 3, width: float = 2., max_samples: int = 1000, figsize=None,
                          data_alpha=0.5, ylim=None):
        nrow = int(ceil(self.nlc / ncol))
        if method == 'mcmc':
            df = self.posterior_samples(derived_parameters=False)
            t0, p = df.tc.median(), df.p.median()
            fmodel = self.flux_model(permutation(df.values)[:max_samples])
            fmperc = percentile(fmodel, [50, 16, 84, 2.5, 97.5], 0)
        else:
            fmodel = squeeze(self.flux_model(self.de.minimum_location))
            t0, p = self.de.minimum_location[0], self.de.minimum_location[1]
            fmperc = None

        fig, axs = subplots(nrow, ncol, figsize=figsize, constrained_layout=True, sharey='all', sharex='all',
                            squeeze=False)
        for i in range(self.nlc):
            ax = axs.flat[i]
            e = epoch(self.times[i].mean(), t0, p)
            tc = t0 + e * p
            time = self.times[i] - tc

            ax.plot(time, self.fluxes[i], '.', alpha=data_alpha)

            if method == 'de':
                ax.plot(time, fmodel[self.lcslices[i]], 'w', lw=4)
                ax.plot(time, fmodel[self.lcslices[i]], 'k', lw=1)
            else:
                ax.fill_between(time, *fmperc[3:5, self.lcslices[i]], alpha=0.15)
                ax.fill_between(time, *fmperc[1:3, self.lcslices[i]], alpha=0.25)
                ax.plot(time, fmperc[0, self.lcslices[i]])

            setp(ax, xlabel=f'Time - T$_c$ [d]', xlim=(-width / 2 / 24, width / 2 / 24))
        setp(axs[:, 0], ylabel='Normalised flux')

        if ylim is not None:
            setp(axs, ylim=ylim)

        for ax in axs.flat[self.nlc:]:
            ax.remove()
        return fig

    def __repr__(self):
        return f"Target: {self.name}\nLPF: {self._lpf_name}\n Passbands: {self.passbands}"
Example #10
0
def ldc_calc_k2(star=None,teff=None,tu=None,logg=None,lu=None,metal=None,metu=None,out_file=None,pickle_file=None,make_txt=True,make_fig=True):
    import numpy as np
    from scipy.interpolate import interp1d
    import ldtk
    from ldtk import (LDPSetCreator, BoxcarFilter,TabulatedFilter)
    from scipy.optimize import curve_fit
    kep_trans = np.loadtxt('/Users/jlothrin/LDCs/kepler_response_lowres1.csv',delimiter=',',skiprows=9)

    waves = kep_trans[:,0] * 1000.
    
    throughs = kep_trans[:,1]
    
    filters = [ldtk.TabulatedFilter('kepler',waves,throughs)]
    #filters = [BoxcarFilter('a',650,750)]
    sc = LDPSetCreator(teff=[teff,tu],logg=[logg,lu],z=[metal,metu],filters=filters)
    
    ps = sc.create_profiles(nsamples=100)
    
    #plot(ps._mu,ps.profile_averages[0],'k',label='Model Intensity')
    #plot(ps._mu,ps.profile_averages[0],'ko',label='Sampled Mu')
    oldmu = ps._mu
    oldprof = ps.profile_averages[0]
    
    ps.set_limb_z_try(sqrt(1-np.min(ps._mu)**2))
    
    qc,qe = ps.coeffs_qd(do_mc=True)
    qc,qm = ps.coeffs_qd(do_mc=True,return_cm=True)
    nlc,nle = ps.coeffs_nl(do_mc=True)
    lnc,lne = ps.coeffs_ln(do_mc=True)
    
    if make_txt is True:    
        text_file = open(out_file,'w')
        text_file.write(star + '\n \n')
        text_file.write('LINEAR: 1 - coeff*(1-mu) \n')
        text_file.write('Linear Coefficient: %s \n' % lnc[0,0])
        text_file.write('Linear Coefficient Uncertainty: %s \n \n' % lne[0])
        text_file.write('QUADRATIC: 1 - coeff1*(1-mu)-coeff2*(1-mu)**2 \n')
        text_file.write('Quadratic Coefficients: %s %f \n' % (qc[0,0],qc[0,1]))
        text_file.write('Quadratic Coefficients Uncertainty: %s %f \n' % (qe[0,0],qe[0,1]))
        text_file.close()
    
    #good_mu = ps_mu[2:]
    lw = 2.0
    ms = 3
    fig0 = figure()
    title(star)
    xlabel('mu')
    ylabel('I')
    ylim([0,1.0])
    plot(oldmu,oldprof,'r',label='Model Intensity',linewidth=lw)
    plot(oldmu,oldprof,'ro',label='Sampled Mu')
    plot(ps._mu,ps.profile_averages[0],'k',label='Rescaled Model Intensity',linewidth=lw)
    plot(ps._mu,ps.profile_averages[0],'ko',label='Rescaled Sampled Mu')
    plot(ps._mu,1-lnc[0,0]*(1-ps._mu),'y',label='Linear Fit',linewidth=lw)
    plot(ps._mu,1-qc[0,0]*(1-ps._mu)-qc[0,1]*(1-ps._mu)**2,'g--',label = 'Quad Fit',linewidth=lw)
    plot(ps._mu,ps.profile_averages[0]+ps.profile_uncertainties[0],'k:',label='Intensity Uncertainties')
    plot(ps._mu,ps.profile_averages[0]-ps.profile_uncertainties[0],'k:')

    
    #plot(new_mu,oldps,'r',label='Shifted')
    #plot(new_mu,oldps,'ro',label='Shifted')
    legend(loc='lower right')
    
    if make_fig is True:
        savefig(star+'ldc.png', format='png')    
        close(fig0)
    
    fig1 = figure()
    title(star)
    xlabel('z')
    ylabel('I')
    plot(ps._z,ps.profile_averages[0],label='Model Intensity')
    plot(ps._z,ps.profile_averages[0],'bo',label='Sampled z')
    plot(ps._z,1-lnc[0,0]*(1-ps._mu),'y',label='Linear Fit')
    plot(ps._z,1-qc[0,0]*(1-ps._mu)-qc[0,1]*(1-ps._mu)**2,'g--',label = 'Quad Fit')
    #plot(new_mu,oldps,'r',label='Shifted')
    legend(loc='lower right')
    close(fig1)

    variables = {'sc':sc,'ps':ps,'qc':qc,'qe':qe,'qm':qm,'lnc':lnc,'lne':lne}
    
    if pickle_file is not None:
        pickle.dump(variables,open(pickle_file,'wb'),protocol=2)
        
    return lnc, lne, qc, qe
Example #11
0
    def __init__(self,
                 host_T,
                 host_logg,
                 host_z,
                 filters,
                 ld_model='quadratic',
                 n_samples=20000,
                 do_mc=False,
                 cache_path=None):

        # Sanity checks
        if not ld_model in _implemented_ld_models:
            raise ValueError('Unrecognised ld_model {}'.format(ld_model))

        self.default_model = ld_model

        # Set up the filters
        #print('Setting up filters')
        ldtk_filters = []
        for i, f in enumerate(filters):
            if isinstance(f[0], Iterable):
                # We have been passed a full filter profile, set up
                # TabulatedFilter
                # Work out if the profile is in percent or fraction - is
                # anything bigget than 1?
                if np.any(f[1] > 1):
                    tmf = 1e-2
                else:
                    tmf = 1
                ldtk_filters.append(TabulatedFilter(i, f[0], f[1], tmf))
            else:
                ldtk_filters.append(BoxcarFilter(i, f[0], f[1]))

        # Make the set creator, downloading data files if required
        if cache_path is not None:
            os.makedirs(cache_path, exist_ok=True)
        #print('Making LD parameter set creator.')
        #print('This may take some time as we may need to download files...')
        set_creator = LDPSetCreator(teff=host_T,
                                    logg=host_logg,
                                    z=host_z,
                                    filters=ldtk_filters,
                                    cache=cache_path,
                                    dataset='visir-lowres')

        # Get the LD profiles from the set creator
        #print('Obtaining LD profiles')
        self.profile_set = set_creator.create_profiles(nsamples=n_samples)

        # Find the 'best values' for each filter and then find the ratios
        # compared to the first.
        #print('Finding coefficients and ratios')
        self.coeffs = {}
        self.ratios = {}

        self._power2_available = True

        for model in _implemented_ld_models:
            try:
                self.coeffs[model] = self._extract_best_coeffs(model)
                self.ratios[
                    model] = self.coeffs[model][0] / self.coeffs[model][0][0]
            except Exception as e:
                print(e)
                print(f'Unable to initialise {model} model')
                self._power2_available = False
Example #12
0
class BaseLPF:
    _lpf_name = 'base'

    def __init__(self, name: str, passbands: list, times: list = None, fluxes: list = None, errors: list = None,
                 pbids: list = None, covariates: list = None, tm: TransitModel = None,
                 nsamples: tuple = 1, exptimes: tuple = 0.):
        self.tm = tm or QuadraticModel(klims=(0.01, 0.75), nk=512, nz=512)

        # LPF name
        # --------
        self.name = name

        # Passbands
        # ---------
        # Should be arranged from blue to red
        if isinstance(passbands, (list, tuple, ndarray)):
            self.passbands = passbands
        else:
            self.passbands = [passbands]
        self.npb = npb = len(self.passbands)

        self.nsamples = None
        self.exptimes = None

        # Declare high-level objects
        # --------------------------
        self.ps = None          # Parametrisation
        self.de = None          # Differential evolution optimiser
        self.sampler = None     # MCMC sampler
        self.instrument = None  # Instrument
        self.ldsc = None        # Limb darkening set creator
        self.ldps = None        # Limb darkening profile set
        self.cntm = None        # Contamination model

        # Declare data arrays and variables
        # ---------------------------------
        self.nlc: int = 0                # Number of light curves
        self.times: list = None          # List of time arrays
        self.fluxes: list = None         # List of flux arrays
        self.errors: list = None         # List of flux uncertainties
        self.covariates: list = None     # List of covariates
        self.wn: ndarray = None          # Array of white noise estimates for each light curve
        self.timea: ndarray = None       # Array of concatenated times
        self.mfluxa: ndarray = None      # Array of concatenated model fluxes
        self.ofluxa: ndarray = None      # Array of concatenated observed fluxes
        self.errora: ndarray = None      # Array of concatenated model fluxes

        self.lcids: ndarray = None       # Array of light curve indices for each datapoint
        self.pbids: ndarray = None       # Array of passband indices for each light curve
        self.lcslices: list = None       # List of light curve slices

        # Set up the observation data
        # ---------------------------
        if times is not None and fluxes is not None and pbids is not None:
            self._init_data(times, fluxes, pbids, covariates, errors, nsamples, exptimes)

        # Setup parametrisation
        # =====================
        self._init_parameters()

        # Initialise the additional lnprior list
        # --------------------------------------
        self.lnpriors = []

        # Initialise the temporary arrays
        # -------------------------------
        self._zpv = zeros(6)
        self._tuv = zeros((npb, 2))
        self._zeros = zeros(npb)
        self._ones = ones(npb)

        # Inititalise the instrument
        self._init_instrument()

        if times is not None:
            self._bad_fluxes = [ones_like(t) for t in self.times]
        else:
            self._bad_fluxes = None


    def _init_data(self, times, fluxes, pbids, covariates=None, errors=None, nsamples=1, exptimes=0.):

        if isinstance(times, ndarray) and times.dtype == float:
            times = [times]

        if isinstance(fluxes, ndarray) and fluxes.dtype == float:
            fluxes = [fluxes]

        self.nlc = len(times)
        self.times = asarray(times)
        self.fluxes = asarray(fluxes)
        self.pbids = asarray(pbids)
        self.wn = [diff(f).std() / sqrt(2) for f in fluxes]
        self.timea = concatenate(self.times)
        self.ofluxa = concatenate(self.fluxes)
        self.mfluxa = zeros_like(self.ofluxa)
        self.pbids = atleast_1d(pbids).astype('int')
        self.lcids = concatenate([full(t.size, i) for i, t in enumerate(self.times)])

        if isscalar(nsamples):
            self.nsamples = full(self.nlc, nsamples)
            self.exptimes = full(self.nlc, exptimes)
        else:
            assert (len(nsamples) == self.nlc) and (len(exptimes) == self.nlc)
            self.nsamples = asarray(nsamples, 'int')
            self.exptimes = asarray(exptimes)

        self.tm.set_data(self.timea, self.lcids, self.pbids, self.nsamples, self.exptimes)

        if errors is None:
            self.errors = array([full(t.size, nan) for t in self.times])
        else:
            self.errors = asarray(errors)
        self.errora = concatenate(self.errors)

        # Initialise the light curves slices
        # ----------------------------------
        self.lcslices = []
        sstart = 0
        for i in range(self.nlc):
            s = self.times[i].size
            self.lcslices.append(s_[sstart:sstart + s])
            sstart += s

        # Initialise the covariate arrays, if given
        # -----------------------------------------
        if covariates is not None:
            self.covariates = covariates
            for cv in self.covariates:
                cv[:, 1:] = (cv[:, 1:] - cv[:, 1:].mean(0)) / cv[:, 1:].ptp(0)
            self.ncovs = self.covariates[0].shape[1]
            self.covsize = array([c.size for c in self.covariates])
            self.covstart = concatenate([[0], self.covsize.cumsum()[:-1]])
            self.cova = concatenate(self.covariates)

    def _init_parameters(self):
        self.ps = ParameterSet()
        self._init_p_orbit()
        self._init_p_planet()
        self._init_p_limb_darkening()
        self._init_p_baseline()
        self._init_p_noise()
        self.ps.freeze()

    def _init_p_orbit(self):
        """Orbit parameter initialisation.
        """
        porbit = [
            GParameter('tc',  'zero_epoch',       'd',      N(0.0,  0.1), (-inf, inf)),
            GParameter('pr',  'period',           'd',      N(1.0, 1e-5), (0,    inf)),
            GParameter('rho', 'stellar_density',  'g/cm^3', U(0.1, 25.0), (0,    inf)),
            GParameter('b',   'impact_parameter', 'R_s',    U(0.0,  1.0), (0,      1))]
        self.ps.add_global_block('orbit', porbit)

    def _init_p_planet(self):
        """Planet parameter initialisation.
        """
        pk2 = [PParameter('k2', 'area_ratio', 'A_s', GM(0.1), (0.01**2, 0.55**2))]
        self.ps.add_passband_block('k2', 1, 1, pk2)
        self._pid_k2 = repeat(self.ps.blocks[-1].start, self.npb)
        self._start_k2 = self.ps.blocks[-1].start
        self._sl_k2 = self.ps.blocks[-1].slice

    def _init_p_limb_darkening(self):
        """Limb darkening parameter initialisation.
        """
        pld = concatenate([
            [PParameter('q1_{:d}'.format(i), 'q1_coefficient', '', U(0, 1), bounds=(0, 1)),
             PParameter('q2_{:d}'.format(i), 'q2_coefficient', '', U(0, 1), bounds=(0, 1))]
            for i in range(self.npb)])
        self.ps.add_passband_block('ldc', 2, self.npb, pld)
        self._sl_ld = self.ps.blocks[-1].slice
        self._start_ld = self.ps.blocks[-1].start

    def _init_p_baseline(self):
        """Baseline parameter initialisation.
        """
        pass

    def _init_p_noise(self):
        """Noise parameter initialisation.
        """
        pns = [LParameter('lne_{:d}'.format(i), 'log_error_{:d}'.format(i), '', U(-8, -0), bounds=(-8, -0)) for i in range(self.nlc)]
        self.ps.add_lightcurve_block('log_err', 1, self.nlc, pns)
        self._sl_err = self.ps.blocks[-1].slice
        self._start_err = self.ps.blocks[-1].start

    def _init_instrument(self):
        pass

    def create_pv_population(self, npop=50):
        pvp = self.ps.sample_from_prior(npop)
        for sl in self.ps.blocks[1].slices:
            pvp[:,sl] = uniform(0.01**2, 0.25**2, size=(npop, 1))

        # With LDTk
        # ---------
        #
        # Use LDTk to create the sample if LDTk has been initialised.
        #
        if self.ldps:
            istart = self._start_ld
            cms, ces = self.ldps.coeffs_tq()
            for i, (cm, ce) in enumerate(zip(cms.flat, ces.flat)):
                pvp[:, i + istart] = normal(cm, ce, size=pvp.shape[0])

        # No LDTk
        # -------
        #
        # Ensure that the total limb darkening decreases towards
        # red passbands.
        #
        else:
            ldsl = self._sl_ld
            for i in range(pvp.shape[0]):
                pid = argsort(pvp[i, ldsl][::2])[::-1]
                pvp[i, ldsl][::2] = pvp[i, ldsl][::2][pid]
                pvp[i, ldsl][1::2] = pvp[i, ldsl][1::2][pid]

        # Estimate white noise from the data
        # ----------------------------------
        for i in range(self.nlc):
            wn = diff(self.ofluxa).std() / sqrt(2)
            pvp[:, self._start_err] = log10(uniform(0.5*wn, 2*wn, size=npop))
        return pvp

    def baseline(self, pv):
        """Multiplicative baseline"""
        return 1.

    def trends(self, pv):
        """Additive trends"""
        return 0.

    def transit_model(self, pv):
        pv = atleast_2d(pv)
        pvp = map_pv(pv)
        ldc = map_ldc(pv[:,self._sl_ld])
        flux = self.tm.evaluate_pv(pvp, ldc)
        return flux

    def flux_model(self, pv):
        baseline    = self.baseline(pv)
        trends      = self.trends(pv)
        model_flux = self.transit_model(pv)
        return baseline * model_flux + trends

    def residuals(self, pv):
        return self.ofluxa - self.flux_model(pv)

    def set_prior(self, pid: int, prior) -> None:
            self.ps[pid].prior = prior

    def add_t14_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the transit duration.

        Parameters
        ----------
        mean
        std

        Returns
        -------

        """
        def T14(pv):
            a = as_from_rhop(pv[2], pv[1])
            t14 = duration_eccentric(pv[1], sqrt(pv[4]), a, mt.acos(pv[3] / a), 0, 0, 1)
            return norm.logpdf(t14, mean, std)
        self.lnpriors.append(T14)

    def add_as_prior(self, mean: float, std: float) -> None:
        """Add a prior on the scaled semi-major axis

        Parameters
        ----------
        mean
        std

        Returns
        -------

        """
        def as_prior(pv):
            a = as_from_rhop(pv[2], pv[1])
            return norm.logpdf(a, mean, std)
        self.lnpriors.append(as_prior)

    def add_ldtk_prior(self, teff: tuple, logg: tuple, z: tuple,
                       uncertainty_multiplier: float = 3,
                       pbs: tuple = ('g', 'r', 'i', 'z')) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        uncertainty_multiplier
        pbs

        Returns
        -------

        """
        fs = {n: f for n, f in zip('g r i z'.split(), (sdss_g, sdss_r, sdss_i, sdss_z))}
        filters = [fs[k] for k in pbs]
        self.ldsc = LDPSetCreator(teff, logg, z, filters)
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)
        def ldprior(pv):
            return self.ldps.lnlike_tq(pv[self._sl_ld])
        self.lnpriors.append(ldprior)

    def remove_outliers(self, sigma=5):
        fmodel = self.flux_model(self.de.minimum_location)
        times, fluxes, pbids, errors = [], [], [], []
        for i in range(len(self.times)):
            res = self.fluxes[i] - fmodel[i]
            mask = ~sigma_clip(res, sigma=sigma).mask
            times.append(self.times[i][mask])
            fluxes.append(self.fluxes[i][mask])
            if self.errors is not None:
                errors.append(self.errors[i][mask])
        self._init_data(times, fluxes, self.pbids, (errors if self.errors is not None else None))

    def remove_transits(self, tids):
        m = ones(len(self.times), bool)
        m[tids] = False
        self._init_data(self.times[m], self.fluxes[m], self.pbids[m],
                        self.covariates[m] if self.covariates is not None else None,
                        self.errors[m], self.nsamples[m], self.exptimes[m])
        self._init_parameters()

    def lnprior(self, pv):
        return self.ps.lnprior(pv) + self.additional_priors(pv)

    def additional_priors(self, pv):
        """Additional priors."""
        pv = atleast_2d(pv)
        return sum([f(pv) for f in self.lnpriors], 0)

    def lnlikelihood(self, pv):
        flux_m = self.flux_model(pv)
        wn = 10**(atleast_2d(pv)[:,self._sl_err])
        return lnlike_normal_v(self.ofluxa, flux_m, wn, self.lcids)

    def lnposterior(self, pv):
        lnp = self.lnprior(pv) + self.lnlikelihood(pv)
        return where(isfinite(lnp), lnp, -inf)

    def __call__(self, pv):
        return self.lnposterior(pv)

    def optimize_global(self, niter=200, npop=50, population=None, label='Global optimisation', leave=False):
        if self.de is None:
            self.de = DiffEvol(self.lnposterior, clip(self.ps.bounds, -1, 1), npop, maximize=True, vectorize=True)
            if population is None:
                self.de._population[:, :] = self.create_pv_population(npop)
            else:
                self.de._population[:,:] = population
        for _ in tqdm(self.de(niter), total=niter, desc=label, leave=leave):
            pass

    def sample_mcmc(self, niter=500, thin=5, label='MCMC sampling', reset=False, leave=True):
        if self.sampler is None:
            self.sampler = EnsembleSampler(self.de.n_pop, self.de.n_par, self.lnposterior, vectorize=True)
            pop0 = self.de.population
        else:
            pop0 = self.sampler.chain[:,-1,:].copy()
        if reset:
            self.sampler.reset()
        for _ in tqdm(self.sampler.sample(pop0, iterations=niter, thin=thin), total=niter, desc=label, leave=False):
            pass

    def posterior_samples(self, burn: int=0, thin: int=1, include_ldc: bool=False):
        ldstart = self._sl_ld.start
        fc = self.sampler.chain[:, burn::thin, :].reshape([-1, self.de.n_par])
        d = fc if include_ldc else fc[:, :ldstart]
        n = self.ps.names if include_ldc else self.ps.names[:ldstart]
        return pd.DataFrame(d, columns=n)

    def plot_mcmc_chains(self, pid: int=0, alpha: float=0.1, thin: int=1, ax=None):
        fig, ax = (None, ax) if ax is not None else subplots()
        ax.plot(self.sampler.chain[:, ::thin, pid].T, 'k', alpha=alpha)
        fig.tight_layout()
        return fig


    def __repr__(self):
        s  = f"""Target: {self.name}
  LPF: {self._lpf_name}
  Passbands: {self.passbands}"""
        return s
Example #13
0
ug = np.sqrt(((const.G.si / const.R_sun.si**2)*umass*const.M_sun.si)**2+((-2*const.G.si*const.M_sun.si / const.R_sun.si**3)*uradius*const.R_sun.si)**2)

#g = [(const.G.si * (mass + j) * const.M_sun.si / (((radius +i) * const.R_sun.si)**2)).cgs.value for i in np.linspace(-uradius,uradius,25) for j in np.linspace(-umass,umass,25)]

g = [(const.G.si * (mass + j) * const.M_sun.si / (((radius +i) * const.R_sun.si)**2)).cgs.value for i in [-uradius,uradius] for j in [-umass,umass]]

#Now for LDCs
kep_trans = np.loadtxt('/Users/jlothrin/LDCs/kepler_response_lowres1.txt',delimiter='	',skiprows=9)

waves = kep_trans[:,0] * 1000.

throughs = kep_trans[:,1]

filters = [ldtk.TabulatedFilter('kepler',waves,throughs)]

sc = LDPSetCreator(teff=[3390,150],logg=[4.9,0.55],z=[-0.25,0.20],filters=filters)

ps = sc.create_profiles(nsamples=500)

plot(ps._mu,ps.profile_averages[0],'k',label='Model Intensity')
plot(ps._mu,ps.profile_averages[0],'ko',label='Sampled Mu')
oldmu = ps._mu
oldprof = ps.profile_averages[0]

ps.set_limb_z_try(sqrt(1-np.min(ps._mu)**2))

qc,qe = ps.coeffs_qd(do_mc=True)
qc,qm = ps.coeffs_qd(do_mc=True,return_cm=True)
nlc,nle = ps.coeffs_nl(do_mc=True)
lnc,lne = ps.coeffs_ln(do_mc=True)
Example #14
0
def createldgrid(minmu,
                 maxmu,
                 orbp,
                 ldmodel='nonlinear',
                 phoenixmin=1e-1,
                 segmentation=int(10),
                 verbose=False):
    '''
    G. ROUDIER: Wrapper around LDTK downloading tools
    LDTK: Parviainen et al. https://github.com/hpparvi/ldtk
    '''
    tstar = orbp['T*']
    terr = np.sqrt(abs(orbp['T*_uperr'] * orbp['T*_lowerr']))
    fehstar = orbp['FEH*']
    feherr = np.sqrt(abs(orbp['FEH*_uperr'] * orbp['FEH*_lowerr']))
    loggstar = orbp['LOGG*']
    loggerr = np.sqrt(abs(orbp['LOGG*_uperr'] * orbp['LOGG*_lowerr']))
    log.warning('>-- Temperature: %s +/- %s', str(tstar), str(terr))
    log.warning('>-- Metallicity: %s +/- %s', str(fehstar), str(feherr))
    log.warning('>-- Surface Gravity: %s +/- %s', str(loggstar), str(loggerr))
    niter = int(len(minmu) / segmentation) + 1
    allcl = None
    allel = None
    out = {}
    avmu = [np.mean([mm, xm]) for mm, xm in zip(minmu, maxmu)]
    for i in np.arange(niter):
        loweri = i * segmentation
        upperi = (i + 1) * segmentation
        if i == (niter - 1): upperi = len(avmu)
        munm = 1e3 * np.array(avmu[loweri:upperi])
        munmmin = 1e3 * np.array(minmu[loweri:upperi])
        munmmax = 1e3 * np.array(maxmu[loweri:upperi])
        filters = [
            BoxcarFilter(str(mue), mun, mux)
            for mue, mun, mux in zip(munm, munmmin, munmmax)
        ]
        sc = LDPSetCreator(teff=(tstar, terr),
                           logg=(loggstar, loggerr),
                           z=(fehstar, feherr),
                           filters=filters)
        ps = sc.create_profiles(nsamples=int(1e4))
        itpfail = False
        for testprof in ps.profile_averages:
            if np.all(~np.isfinite(testprof)): itpfail = True
            pass
        nfail = 1e0
        while itpfail:
            nfail *= 2
            sc = LDPSetCreator(teff=(tstar, nfail * terr),
                               logg=(loggstar, loggerr),
                               z=(fehstar, feherr),
                               filters=filters)
            ps = sc.create_profiles(nsamples=int(1e4))
            itpfail = False
            for testprof in ps.profile_averages:
                if np.all(~np.isfinite(testprof)): itpfail = True
                pass
            pass
        cl, el = ldx(ps.profile_mu,
                     ps.profile_averages,
                     ps.profile_uncertainties,
                     mumin=phoenixmin,
                     debug=verbose,
                     model=ldmodel)
        if allcl is None: allcl = cl
        else: allcl = np.concatenate((allcl, cl), axis=0)
        if allel is None: allel = el
        else: allel = np.concatenate((allel, el), axis=0)
        pass
    allel[allel > 1.] = 0.
    allel[~np.isfinite(allel)] = 0.
    out['MU'] = avmu
    out['LD'] = allcl.T
    out['ERR'] = allel.T
    for i in range(0, len(allcl.T)):
        log.warning('>-- LD%s: %s +/- %s', str(int(i)), str(float(allcl.T[i])),
                    str(float(allel.T[i])))
        pass
    return out
Example #15
0
class BaseLPF:
    _lpf_name = 'BaseLPF'

    def __init__(self, name: str, passbands: list, times: list = None, fluxes: list = None, errors: list = None,
                 pbids: list = None, covariates: list = None, wnids: list = None, tm: TransitModel = None,
                 nsamples: tuple = 1, exptimes: tuple = 0., init_data=True, result_dir: Path = None):
        """The base Log Posterior Function class.

        The `BaseLPF` class creates the basis for transit light curve analyses using `PyTransit`. This class can be
        used in a basic analysis directly, or it can be inherited to create a basis for a more complex analysis.

        Parameters
        ----------
        name: str
            Name of the log posterior function instance.

        passbands: iterable
            List of unique passband names (filters) that the light curves have been observed in.

        times: iterable
            List of 1d ndarrays each containing the mid-observation times for a single light curve.

        fluxes: iterable
            List of 1d ndarrays each containing the normalized fluxes for a single light curve.

        errors: iterable
            List of 1d ndarrays each containing the flux measurement uncertainties for a single light curvel.

        pbids: iterable of ints
            List of passband indices mapping each light curve to a single passband.

        covariates: iterable
            List of covariates one 2d narray per light curve.

        wnids: iterable of ints
            List of noise set indices mapping each light curve to a single noise set.

        tm: TransitModel
            Transitmodel to use instead of the default model.

        nsamples: list[int]
            List of supersampling factors.  The values should be integers and given one per light curve.

        exptimes: list[float]
            List of exposure times. The values should be floats with the time given in days.

        init_data: bool
            Set to `False` to allow the LPF to be initialized without data. This is mainly for debugging.

        result_dir: Path
            Default saving directory
        """

        self._pre_initialisation()

        self.tm = tm or QuadraticModel(klims=(0.01, 0.75), nk=512, nz=512)

        # LPF name
        # --------
        self.name = name
        self.result_dir = result_dir

        # Passbands
        # ---------
        # Should be arranged from blue to red
        if isinstance(passbands, (list, tuple, ndarray)):
            self.passbands = passbands
        else:
            self.passbands = [passbands]
        self.npb = npb = len(self.passbands)

        self.nsamples = None
        self.exptimes = None

        # Declare high-level objects
        # --------------------------
        self.ps = None          # Parametrisation
        self.de = None          # Differential evolution optimiser
        self.sampler = None     # MCMC sampler
        self.instrument = None  # Instrument
        self.ldsc = None        # Limb darkening set creator
        self.ldps = None        # Limb darkening profile set
        self.cntm = None        # Contamination model

        # Declare data arrays and variables
        # ---------------------------------
        self.nlc: int = 0                # Number of light curves
        self.n_noise_blocks: int = 0     # Number of noise blocks
        self.noise_ids = None
        self.times: list = None          # List of time arrays
        self.fluxes: list = None         # List of flux arrays
        self.errors: list = None         # List of flux uncertainties
        self.covariates: list = None     # List of covariates
        self.wn: ndarray = None          # Array of white noise estimates for each light curve
        self.timea: ndarray = None       # Array of concatenated times
        self.mfluxa: ndarray = None      # Array of concatenated model fluxes
        self.ofluxa: ndarray = None      # Array of concatenated observed fluxes
        self.errora: ndarray = None      # Array of concatenated model fluxes

        self.lcids: ndarray = None       # Array of light curve indices for each datapoint
        self.pbids: ndarray = None       # Array of passband indices for each light curve
        self.lcslices: list = None       # List of light curve slices

        self._local_minimization = None

        # Initialise the additional lnprior list
        # --------------------------------------
        self.lnpriors = []

        if init_data:
            # Set up the observation data
            # ---------------------------
            self._init_data(times = times, fluxes = fluxes, pbids = pbids, covariates = covariates,
                            errors = errors, wnids = wnids, nsamples = nsamples, exptimes = exptimes)

            # Set up the parametrisation
            # --------------------------
            self._init_parameters()

            # Inititalise the instrument
            # --------------------------
            self._init_instrument()

        self._post_initialisation()


    def _init_data(self, times, fluxes, pbids=None, covariates=None, errors=None, wnids = None, nsamples=1, exptimes=0.):

        if isinstance(times, ndarray) and times.ndim == 1 and times.dtype == float:
            times = [times]

        if isinstance(fluxes, ndarray) and fluxes.ndim == 1 and fluxes.dtype == float:
            fluxes = [fluxes]

        if pbids is None:
            if self.pbids is None:
                self.pbids = zeros(len(fluxes), int)
        else:
            self.pbids = atleast_1d(pbids).astype('int')

        self.nlc = len(times)
        self.times = times
        self.fluxes = fluxes
        self.wn = [nanstd(diff(f)) / sqrt(2) for f in fluxes]
        self.timea = concatenate(self.times)
        self.ofluxa = concatenate(self.fluxes)
        self.mfluxa = zeros_like(self.ofluxa)
        self.lcids = concatenate([full(t.size, i) for i, t in enumerate(self.times)])

        # TODO: Noise IDs get scrambled when removing transits, fix!!!
        if wnids is None:
            if self.noise_ids is None:
                self.noise_ids = zeros(self.nlc, int)
                self.n_noise_blocks = 1
        else:
            self.noise_ids = asarray(wnids)
            self.n_noise_blocks = len(unique(self.noise_ids))
            assert self.noise_ids.size == self.nlc, "Need one noise block id per light curve."
            assert self.noise_ids.max() == self.n_noise_blocks - 1, "Error initialising noise block ids."

        if isscalar(nsamples):
            self.nsamples = full(self.nlc, nsamples)
            self.exptimes = full(self.nlc, exptimes)
        else:
            assert (len(nsamples) == self.nlc) and (len(exptimes) == self.nlc)
            self.nsamples = asarray(nsamples, 'int')
            self.exptimes = asarray(exptimes)

        self.tm.set_data(self.timea, self.lcids, self.pbids, self.nsamples, self.exptimes)

        if errors is None:
            self.errors = array([full(t.size, nan) for t in self.times])
        else:
            self.errors = asarray(errors)
        self.errora = concatenate(self.errors)

        # Initialise the light curves slices
        # ----------------------------------
        self.lcslices = []
        sstart = 0
        for i in range(self.nlc):
            s = self.times[i].size
            self.lcslices.append(s_[sstart:sstart + s])
            sstart += s

        # Initialise the covariate arrays, if given
        # -----------------------------------------
        if covariates is not None:
            self.covariates = covariates
            for cv in self.covariates:
                cv = (cv - cv.mean(0)) / cv.std(0)
            #self.ncovs = self.covariates[0].shape[1]
            #self.covsize = array([c.size for c in self.covariates])
            #self.covstart = concatenate([[0], self.covsize.cumsum()[:-1]])
            #self.cova = concatenate(self.covariates)

    def print_parameters(self, columns: int = 2):
        columns = max(1, columns)
        for i, p in enumerate(self.ps):
            print(p.__repr__(), end=('\n' if i % columns == columns - 1 else '\t'))

    def _init_parameters(self):
        self.ps = ParameterSet()
        self._init_p_orbit()
        self._init_p_planet()
        self._init_p_limb_darkening()
        self._init_p_baseline()
        self._init_p_noise()
        self.ps.freeze()

    def _init_p_orbit(self):
        """Orbit parameter initialisation.
        """
        porbit = [
            GParameter('tc',  'zero_epoch',       'd',      N(0.0,  0.1), (-inf, inf)),
            GParameter('p',   'period',           'd',      N(1.0, 1e-5), (0,    inf)),
            GParameter('rho', 'stellar_density',  'g/cm^3', U(0.1, 25.0), (0,    inf)),
            GParameter('b',   'impact_parameter', 'R_s',    U(0.0,  1.0), (0,      1))]
        self.ps.add_global_block('orbit', porbit)

    def _init_p_planet(self):
        """Planet parameter initialisation.
        """
        pk2 = [PParameter('k2', 'area_ratio', 'A_s', GM(0.1), (0.01**2, 0.75**2))]
        self.ps.add_passband_block('k2', 1, 1, pk2)
        self._pid_k2 = repeat(self.ps.blocks[-1].start, self.npb)
        self._start_k2 = self.ps.blocks[-1].start
        self._sl_k2 = self.ps.blocks[-1].slice

    def _init_p_limb_darkening(self):
        """Limb darkening parameter initialisation.
        """
        pld = concatenate([
            [PParameter('q1_{:d}'.format(i), 'q1_coefficient', '', U(0, 1), bounds=(0, 1)),
             PParameter('q2_{:d}'.format(i), 'q2_coefficient', '', U(0, 1), bounds=(0, 1))]
            for i in range(self.npb)])
        self.ps.add_passband_block('ldc', 2, self.npb, pld)
        self._sl_ld = self.ps.blocks[-1].slice
        self._start_ld = self.ps.blocks[-1].start

    def _init_p_baseline(self):
        """Baseline parameter initialisation.
        """
        self._sl_bl = None

    def _init_p_noise(self):
        """Noise parameter initialisation.
        """
        pns = [LParameter('loge_{:d}'.format(i), 'log10_error_{:d}'.format(i), '', U(-4, 0), bounds=(-4, 0)) for i in range(self.n_noise_blocks)]
        self.ps.add_lightcurve_block('log_err', 1, self.n_noise_blocks, pns)
        self._sl_err = self.ps.blocks[-1].slice
        self._start_err = self.ps.blocks[-1].start

    def _init_instrument(self):
        pass

    def _pre_initialisation(self):
        pass

    def _post_initialisation(self):
        pass

    def create_pv_population(self, npop=50):
        pvp = self.ps.sample_from_prior(npop)
        for sl in self.ps.blocks[1].slices:
            pvp[:,sl] = uniform(0.01**2, 0.25**2, size=(npop, 1))

        # With LDTk
        # ---------
        #
        # Use LDTk to create the sample if LDTk has been initialised.
        #
        if self.ldps:
            istart = self._start_ld
            cms, ces = self.ldps.coeffs_tq()
            for i, (cm, ce) in enumerate(zip(cms.flat, ces.flat)):
                pvp[:, i + istart] = normal(cm, ce, size=pvp.shape[0])

        # No LDTk
        # -------
        #
        # Ensure that the total limb darkening decreases towards
        # red passbands.
        #
        else:
            ldsl = self._sl_ld
            for i in range(pvp.shape[0]):
                pid = argsort(pvp[i, ldsl][::2])[::-1]
                pvp[i, ldsl][::2] = pvp[i, ldsl][::2][pid]
                pvp[i, ldsl][1::2] = pvp[i, ldsl][1::2][pid]

        # Estimate white noise from the data
        # ----------------------------------
        for i in range(self.nlc):
            wn = diff(self.ofluxa).std() / sqrt(2)
            pvp[:, self._start_err] = log10(uniform(0.5*wn, 2*wn, size=npop))
        return pvp

    def baseline(self, pv):
        """Multiplicative baseline"""
        return 1.

    def trends(self, pv):
        """Additive trends"""
        return 0.

    def transit_model(self, pv, copy=True):
        pv = atleast_2d(pv)
        pvp = map_pv(pv)
        ldc = map_ldc(pv[:,self._sl_ld])
        flux = self.tm.evaluate_pv(pvp, ldc, copy)
        return flux

    def flux_model(self, pv):
        baseline    = self.baseline(pv)
        trends      = self.trends(pv)
        model_flux = self.transit_model(pv)
        return baseline * model_flux + trends

    def residuals(self, pv):
        return self.ofluxa - self.flux_model(pv)

    def set_prior(self, parameter, prior, *nargs) -> None:
        if isinstance(parameter, str):
            descriptions = self.ps.descriptions
            names = self.ps.names
            if parameter in descriptions:
                parameter = descriptions.index(parameter)
            elif parameter in names:
                parameter = names.index(parameter)
            else:
                params = ', '.join([f"{ln} ({sn})" for ln, sn in zip(self.ps.descriptions, self.ps.names)])
                raise ValueError(f'Parameter "{parameter}" not found from the parameter set: {params}')

        if isinstance(prior, str):
            if prior.lower() in ['n', 'np', 'normal']:
                prior = N(nargs[0], nargs[1])
            elif prior.lower() in ['u', 'up', 'uniform']:
                prior = U(nargs[0], nargs[1])
            else:
                raise ValueError(f'Unknown prior "{prior}". Allowed values are (N)ormal and (U)niform.')

        self.ps[parameter].prior = prior

    def set_radius_ratio_prior(self, kmin, kmax):
        for p in self.ps[self._sl_k2]:
            p.prior = U(kmin ** 2, kmax ** 2)
            p.bounds = [kmin ** 2, kmax ** 2]
        self.ps.thaw()
        self.ps.freeze()

    def add_t14_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the transit duration.

        Parameters
        ----------
        mean: float
            Mean of the normal distribution
        std: float
            Standard deviation of the normal distribution.
        """

        def T14(pv):
            pv = atleast_2d(pv)
            a = as_from_rhop(pv[:, 2], pv[:, 1])
            t14 = duration_eccentric(pv[:, 1], sqrt(pv[:, 4]), a, arccos(pv[:, 3] / a), 0, 0, 1)
            return norm.logpdf(t14, mean, std)

        self.lnpriors.append(T14)

    def add_as_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the scaled semi-major axis :math:`(a / R_\star)`.

        Parameters
        ----------
        mean: float
            Mean of the normal distribution.
        std: float
            Standard deviation of the normal distribution
        """
        def as_prior(pv):
            a = as_from_rhop(pv[2], pv[1])
            return norm.logpdf(a, mean, std)
        self.lnpriors.append(as_prior)

    def add_ldtk_prior(self, teff: tuple, logg: tuple, z: tuple, passbands: tuple,
                       uncertainty_multiplier: float = 3, **kwargs) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        passbands
        uncertainty_multiplier

        Returns
        -------

        """
        if 'pbs' in kwargs.keys():
            raise DeprecationWarning("The 'pbs' argument has been renamed to 'passbands'")

        if isinstance(passbands[0], str):
            raise DeprecationWarning(
                'Passing passbands by name has been deprecated, they should be now Filter instances.')

        self.ldsc = LDPSetCreator(teff, logg, z, list(passbands))
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)

        def ldprior(pv):
            return self.ldps.lnlike_tq(pv[:, self._sl_ld].reshape([pv.shape[0], -1, 2]))

        self.lnpriors.append(ldprior)


    def remove_outliers(self, sigma=5):
        fmodel = squeeze(self.flux_model(self.de.minimum_location))
        covariates = [] if self.covariates is not None else None
        times, fluxes, lcids, errors = [], [], [], []
        for i in range(len(self.times)):
            res = self.fluxes[i] - fmodel[self.lcslices[i]]
            mask = ~sigma_clip(res, sigma=sigma).mask
            times.append(self.times[i][mask])
            fluxes.append(self.fluxes[i][mask])
            if covariates is not None:
                covariates.append(self.covariates[i][mask])
            if self.errors is not None:
                errors.append(self.errors[i][mask])

        self._init_data(times=times, fluxes=fluxes, covariates=self.covariates, pbids=self.pbids,
                        errors=(errors if self.errors is not None else None), wnids=self.noise_ids,
                        nsamples=self.nsamples, exptimes=self.exptimes)


    def remove_transits(self, tids):
        m = ones(len(self.times), bool)
        m[tids] = False
        self._init_data(self.times[m], self.fluxes[m], self.pbids[m],
                        self.covariates[m] if self.covariates is not None else None,
                        self.errors[m], self.noise_ids[m], self.nsamples[m], self.exptimes[m])
        self._init_parameters()

    def lnprior(self, pv: ndarray) -> Union[Iterable,float]:
        """Log prior density for a 1D or 2D array of model parameters.

        Parameters
        ----------
        pv: ndarray
            Either a 1D parameter vector or a 2D parameter array.

        Returns
        -------
            Log prior density for the given parameter vector(s).
        """
        return self.ps.lnprior(pv) + self.additional_priors(pv)

    def additional_priors(self, pv):
        """Additional priors."""
        pv = atleast_2d(pv)
        return sum([f(pv) for f in self.lnpriors], 0)

    def lnlikelihood(self, pv):
        """Log likelihood for a 1D or 2D array of model parameters.

        Parameters
        ----------
        pv: ndarray
            Either a 1D parameter vector or a 2D parameter array.

        Returns
        -------
            Log likelihood for the given parameter vector(s).
        """
        flux_m = self.flux_model(pv)
        wn = 10**(atleast_2d(pv)[:,self._sl_err])
        return lnlike_normal_v(self.ofluxa, flux_m, wn, self.noise_ids, self.lcids)

    def lnposterior(self, pv):
        lnp = self.lnprior(pv) + self.lnlikelihood(pv)
        return where(isfinite(lnp), lnp, -inf)

    def __call__(self, pv):
        return self.lnposterior(pv)

    def optimize_global(self, niter=200, npop=50, population=None, label='Global optimisation', leave=False,
                        plot_convergence: bool = True, use_tqdm: bool = True):

        if self.de is None:
            self.de = DiffEvol(self.lnposterior, clip(self.ps.bounds, -1, 1), npop, maximize=True, vectorize=True)
            if population is None:
                self.de._population[:, :] = self.create_pv_population(npop)
            else:
                self.de._population[:,:] = population
        for _ in tqdm(self.de(niter), total=niter, desc=label, leave=leave, disable=(not use_tqdm)):
            pass

        if plot_convergence:
            fig, axs = subplots(1, 5, figsize=(13, 2), constrained_layout=True)
            rfit = self.de._fitness
            mfit = isfinite(rfit)

            if hasattr(self, '_old_de_fitness'):
                m = isfinite(self._old_de_fitness)
                axs[0].hist(-self._old_de_fitness[m], facecolor='midnightblue', bins=25, alpha=0.25)
            axs[0].hist(-rfit[mfit], facecolor='midnightblue', bins=25)

            for i, ax in zip([0, 2, 3, 4], axs[1:]):
                if hasattr(self, '_old_de_fitness'):
                    m = isfinite(self._old_de_fitness)
                    ax.plot(self._old_de_population[m, i], -self._old_de_fitness[m], 'kx', alpha=0.25)
                ax.plot(self.de.population[mfit, i], -rfit[mfit], 'k.')
                ax.set_xlabel(self.ps.descriptions[i])
            setp(axs, yticks=[])
            setp(axs[1], ylabel='Log posterior')
            setp(axs[0], xlabel='Log posterior')
            sb.despine(fig, offset=5)
        self._old_de_population = self.de.population.copy()
        self._old_de_fitness = self.de._fitness.copy()

    def optimize_local(self, pv0=None, method='powell'):
        if pv0 is None:
            if self.de is not None:
                pv0 = self.de.minimum_location
            else:
                pv0 = self.ps.mean_pv
                pv0[self._sl_err] = log10(self.wn)
        res = minimize(lambda pv: -self.lnposterior(pv), pv0, method=method)
        self._local_minimization = res

    def sample_mcmc(self, niter: int = 500, thin: int = 5, repeats: int = 1, npop: int = None, population=None,
                    label='MCMC sampling', reset=True, leave=True, save=False, use_tqdm: bool = True):

        if save and self.result_dir is None:
            raise ValueError('The MCMC sampler is set to save the results, but the result directory is not set.')

        if self.sampler is None:
            if population is not None:
                pop0 = population
            elif hasattr(self, '_local_minimization') and self._local_minimization is not None:
                pop0 = multivariate_normal(self._local_minimization.x, diag(full(len(self.ps), 0.001 ** 2)), size=npop)
            elif self.de is not None:
                pop0 = self.de.population.copy()
            else:
                raise ValueError('Sample MCMC needs an initial population.')
            self.sampler = EnsembleSampler(pop0.shape[0], pop0.shape[1], self.lnposterior, vectorize=True)
        else:
            pop0 = self.sampler.chain[:,-1,:].copy()

        for i in tqdm(range(repeats), desc='MCMC sampling', disable=(not use_tqdm)):
            if reset or i > 0:
                self.sampler.reset()
            for _ in tqdm(self.sampler.sample(pop0, iterations=niter, thin=thin), total=niter,
                          desc='Run {:d}/{:d}'.format(i+1, repeats), leave=False, disable=(not use_tqdm)):
                pass
            if save:
                self.save(self.result_dir)
            pop0 = self.sampler.chain[:,-1,:].copy()

    def posterior_samples(self, burn: int = 0, thin: int = 1, derived_parameters: bool = True):
        fc = self.sampler.chain[:, burn::thin, :].reshape([-1, self.de.n_par])
        df = pd.DataFrame(fc, columns=self.ps.names)
        if derived_parameters:
            for k2c in df.columns[self._sl_k2]:
                df[k2c.replace('k2', 'k')] = sqrt(df[k2c])
            df['a'] = as_from_rhop(df.rho.values, df.p.values)
            df['inc'] = i_from_baew(df.b.values, df.a.values, 0., 0.)

            average_ks = sqrt(df.iloc[:, self._sl_k2]).mean(1).values
            df['t14'] = d_from_pkaiews(df.p.values, average_ks, df.a.values, df.inc.values, 0., 0., 1)
        return df

    def plot_mcmc_chains(self, pid: int=0, alpha: float=0.1, thin: int=1, ax=None):
        fig, ax = (None, ax) if ax is not None else subplots()
        ax.plot(self.sampler.chain[:, ::thin, pid].T, 'k', alpha=alpha)
        fig.tight_layout()
        return fig

    def save(self, save_path: Path = '.'):
        save_path = Path(save_path)

        if self.de:
            de = xa.DataArray(self.de.population, dims='pvector name'.split(), coords={'name': self.ps.names})
        else:
            de = None

        if self.sampler is not None:
            mc = xa.DataArray(self.sampler.chain, dims='pvector step name'.split(),
                              coords={'name': self.ps.names}, attrs={'ndim': self.de.n_par, 'npop': self.de.n_pop})
        else:
            mc = None

        ds = xa.Dataset(data_vars={'de_population_lm': de, 'lm_mcmc': mc},
                        attrs={'created': strftime('%Y-%m-%d %H:%M:%S'), 'target': self.name})
        ds.to_netcdf(save_path.joinpath(f'{self.name}.nc'))

    def plot_light_curves(self, method='de', ncol: int = 3, width: float = 2., max_samples: int = 1000, figsize=None,
                          data_alpha=0.5, ylim=None):
        nrow = int(ceil(self.nlc / ncol))
        if method == 'mcmc':
            df = self.posterior_samples(derived_parameters=False)
            t0, p = df.tc.median(), df.p.median()
            fmodel = self.flux_model(permutation(df.values)[:max_samples])
            fmperc = percentile(fmodel, [50, 16, 84, 2.5, 97.5], 0)
        else:
            fmodel = squeeze(self.flux_model(self.de.minimum_location))
            t0, p = self.de.minimum_location[0], self.de.minimum_location[1]
            fmperc = None

        fig, axs = subplots(nrow, ncol, figsize=figsize, constrained_layout=True, sharey='all', sharex='all',
                            squeeze=False)
        for i in range(self.nlc):
            ax = axs.flat[i]
            e = epoch(self.times[i].mean(), t0, p)
            tc = t0 + e * p
            time = self.times[i] - tc

            ax.plot(time, self.fluxes[i], '.', alpha=data_alpha)

            if method == 'de':
                ax.plot(time, fmodel[self.lcslices[i]], 'w', lw=4)
                ax.plot(time, fmodel[self.lcslices[i]], 'k', lw=1)
            else:
                ax.fill_between(time, *fmperc[3:5, self.lcslices[i]], alpha=0.15)
                ax.fill_between(time, *fmperc[1:3, self.lcslices[i]], alpha=0.25)
                ax.plot(time, fmperc[0, self.lcslices[i]])

            setp(ax, xlabel=f'Time - T$_c$ [d]', xlim=(-width / 2 / 24, width / 2 / 24))
        setp(axs[:, 0], ylabel='Normalised flux')

        if ylim is not None:
            setp(axs, ylim=ylim)

        for ax in axs.flat[self.nlc:]:
            ax.remove()
        return fig

    def __repr__(self):
        return f"Target: {self.name}\nLPF: {self._lpf_name}\n Passbands: {self.passbands}"
Example #16
0
class M2LPF(BaseLPF):
    def __init__(self,
                 target: str,
                 photometry: list,
                 tid: int,
                 cids: list,
                 filters: tuple,
                 aperture_lims: tuple = (0, inf),
                 use_opencl: bool = False,
                 n_legendre: int = 0,
                 use_toi_info=True,
                 with_transit=True,
                 with_contamination=False,
                 radius_ratio: str = 'achromatic'):
        assert radius_ratio in ('chromatic', 'achromatic')

        self.use_opencl = use_opencl
        self.planet = None

        self.photometry_frozen = False
        self.with_transit = with_transit
        self.with_contamination = with_contamination
        self.chromatic_transit = radius_ratio == 'chromatic'
        self.radius_ratio = radius_ratio
        self.n_legendre = n_legendre

        self.toi = None
        if self.with_transit and 'toi' in target.lower() and use_toi_info:
            self.toi = get_toi(float(target.lower().strip('toi')))

        # Set photometry
        # --------------
        self.phs = photometry
        self.nph = len(photometry)

        # Set the aperture ranges
        # -----------------------
        self.min_apt = amin = min(max(aperture_lims[0], 0),
                                  photometry[0].flux.aperture.size)
        self.max_apt = amax = max(
            min(aperture_lims[1], photometry[0].flux.aperture.size), 0)
        self.napt = amax - amin

        # Target and comparison star IDs
        # ------------------------------
        self.tid = atleast_1d(tid)
        if self.tid.size == 1:
            self.tid = tile(self.tid, self.nph)

        self.cids = atleast_2d(cids)
        if self.cids.shape[0] == 1:
            self.cids = tile(self.cids, (self.nph, 1))

        assert self.tid.size == self.nph
        assert self.cids.shape[0] == self.nph

        self.covnames = 'intercept sky airmass xshift yshift entropy'.split()

        times = [array(ph.bjd) for ph in photometry]
        fluxes = [
            array(ph.flux[:, tid, 1]) for tid, ph in zip(self.tid, photometry)
        ]
        fluxes = [f / nanmedian(f) for f in fluxes]
        self.apertures = ones(len(times)).astype('int')

        self.t0 = floor(times[0].min())
        times = [t - self.t0 for t in times]

        self._tmin = times[0].min()
        self._tmax = times[0].max()

        covariates = []
        for ph in photometry:
            covs = concatenate(
                [ones([ph._fmask.sum(), 1]),
                 array(ph.aux)[:, [1, 3, 4, 5]]], 1)
            covariates.append(covs)

        self.airmasses = [array(ph.aux[:, 2]) for ph in photometry]

        wns = [ones(ph.nframes) for ph in photometry]

        if use_opencl:
            import pyopencl as cl
            ctx = cl.create_some_context()
            queue = cl.CommandQueue(ctx)
            tm = QuadraticModelCL(klims=(0.005, 0.25),
                                  nk=512,
                                  nz=512,
                                  cl_ctx=ctx,
                                  cl_queue=queue)
        else:
            tm = QuadraticModel(interpolate=True,
                                klims=(0.005, 0.25),
                                nk=512,
                                nz=512)

        super().__init__(target,
                         filters,
                         times,
                         fluxes,
                         wns,
                         arange(len(photometry)),
                         covariates,
                         arange(len(photometry)),
                         tm=tm)

        self.legendre = [
            legvander((t - t.min()) / (0.5 * t.ptp()) - 1,
                      self.n_legendre)[:, 1:] for t in self.times
        ]

        # Create the target and reference star flux arrays
        # ------------------------------------------------
        self.ofluxes = [
            array(ph.flux[:, self.tid[i], amin:amax + 1] /
                  ph.flux[:, self.tid[i], amin:amax + 1].median('mjd'))
            for i, ph in enumerate(photometry)
        ]

        self.refs = []
        for ip, ph in enumerate(photometry):
            self.refs.append([
                pad(array(ph.flux[:, cid, amin:amax + 1]), ((0, 0), (1, 0)),
                    mode='constant') for cid in self.cids[ip]
            ])

        self.set_orbit_priors()

    def _init_parameters(self):
        self.ps = ParameterSet()
        if self.with_transit:
            self._init_p_orbit()
            self._init_p_planet()
            self._init_p_limb_darkening()
        self._init_p_baseline()
        self._init_p_noise()
        self.ps.freeze()

    def set_orbit_priors(self):
        if self.with_transit and self.toi is not None:
            tn = round((self.times[0].mean() - (self.toi.epoch[0] - self.t0)) /
                       self.toi.period[0])
            epoch = ufloat(*self.toi.epoch)
            period = ufloat(*self.toi.period)
            tc = epoch - self.t0 + tn * period
            self.set_prior(0, NP(tc.n, tc.s))
            self.set_prior(1, NP(*self.toi.period))
            self.add_t14_prior(self.toi.duration[0] / 24,
                               0.5 * self.toi.duration[1] / 24)

    def _init_p_planet(self):
        """Planet parameter initialisation.
        """
        if self.radius_ratio == 'achromatic':
            pk2 = [
                PParameter('k2', 'area_ratio', 'A_s', UP(0.005**2, 0.25**2),
                           (0.005**2, 0.25**2))
            ]
            self.ps.add_passband_block('k2', 1, 1, pk2)
            self._pid_k2 = repeat(self.ps.blocks[-1].start, self.npb)
        else:
            pk2 = [
                PParameter(f'k2_{pb}', f'area_ratio {pb}', 'A_s',
                           UP(0.005**2, 0.25**2), (0.005**2, 0.25**2))
                for pb in self.passbands
            ]
            self.ps.add_passband_block('k2', 1, self.npb, pk2)
            self._pid_k2 = arange(self.npb) + self.ps.blocks[-1].start
        self._start_k2 = self.ps.blocks[-1].start
        self._sl_k2 = self.ps.blocks[-1].slice

        if self.with_contamination:
            pcn = [
                PParameter('cnt_ref', 'Reference contamination', '',
                           UP(0., 1.), (0., 1.))
            ]
            pcn.extend([
                PParameter(f'cnt_{pb}', 'contamination', '', UP(-1., 1.),
                           (-1., 1.)) for pb in self.passbands[1:]
            ])
            self.ps.add_passband_block('contamination', 1, self.npb, pcn)
            self._pid_cn = arange(self.ps.blocks[-1].start,
                                  self.ps.blocks[-1].stop)
            self._sl_cn = self.ps.blocks[-1].slice

    def _init_p_baseline(self):
        c = []
        for ilc in range(self.nlc):
            c.append(
                LParameter(f'ci_{ilc:d}',
                           'intercept_{ilc:d}',
                           '',
                           NP(1.0, 0.03),
                           bounds=(0.5, 1.5)))
            c.append(
                LParameter(f'cs_{ilc:d}',
                           'sky_{ilc:d}',
                           '',
                           NP(0.0, 0.01),
                           bounds=(-0.5, 0.5)))
            c.append(
                LParameter(f'ca_{ilc:d}',
                           'airmass_{ilc:d}',
                           '',
                           NP(0.0, 0.01),
                           bounds=(-0.5, 0.5)))
            c.append(
                LParameter(f'cx_{ilc:d}',
                           'xshift_{ilc:d}',
                           '',
                           NP(0.0, 0.01),
                           bounds=(-0.5, 0.5)))
            c.append(
                LParameter(f'cy_{ilc:d}',
                           'yshift_{ilc:d}',
                           '',
                           NP(0.0, 0.01),
                           bounds=(-0.5, 0.5)))
            c.append(
                LParameter(f'ce_{ilc:d}',
                           'entropy_{ilc:d}',
                           '',
                           NP(0.0, 0.01),
                           bounds=(-0.5, 0.5)))
        self.ps.add_lightcurve_block('ccoef', 6, self.nlc, c)
        self._sl_ccoef = self.ps.blocks[-1].slice
        self._start_ccoef = self.ps.blocks[-1].start

        if self.n_legendre > 0:
            c = []
            for ilc in range(self.nlc):
                for ilg in range(self.n_legendre):
                    c.append(
                        LParameter(f'leg_{ilc:d}_{ilg:d}',
                                   f'legendre__{ilc:d}_{ilg:d}',
                                   '',
                                   NP(0.0, 0.01),
                                   bounds=(-0.5, 0.5)))
            self.ps.add_lightcurve_block('legendre_polynomials',
                                         self.n_legendre, self.nlc, c)
            self._sl_leg = self.ps.blocks[-1].slice
            self._start_leg = self.ps.blocks[-1].start

        if not self.photometry_frozen:
            c = []
            for ilc in range(self.nlc):
                c.append(
                    LParameter(f'tap_{ilc:d}',
                               f'target_aperture__{ilc:d}',
                               '',
                               UP(0.0, 0.999),
                               bounds=(0.0, 0.999)))
            self.ps.add_lightcurve_block('apertures', 1, self.nlc, c)
            self._sl_tap = self.ps.blocks[-1].slice
            self._start_tap = self.ps.blocks[-1].start

            c = []
            for ilc in range(self.nlc):
                for irf in range(self.cids.shape[1]):
                    c.append(
                        LParameter(f'ref_{irf:d}_{ilc:d}',
                                   f'comparison_star_{irf:d}_{ilc:d}',
                                   '',
                                   UP(0.0, 0.999),
                                   bounds=(0.0, 0.999)))
            self.ps.add_lightcurve_block('rstars', self.cids.shape[1],
                                         self.nlc, c)
            self._sl_ref = self.ps.blocks[-1].slice
            self._start_ref = self.ps.blocks[-1].start

    def _init_p_noise(self):
        """Noise parameter initialisation.
        """
        pns = [
            LParameter('loge_{:d}'.format(i),
                       'log10_error_{:d}'.format(i),
                       '',
                       UP(-3, 0),
                       bounds=(-3, 0)) for i in range(self.n_noise_blocks)
        ]
        self.ps.add_lightcurve_block('log_err', 1, self.n_noise_blocks, pns)
        self._sl_err = self.ps.blocks[-1].slice
        self._start_err = self.ps.blocks[-1].start

    def _init_instrument(self):
        self.instrument = Instrument('MuSCAT2',
                                     [sdss_g, sdss_r, sdss_i, sdss_z])
        self.cm = SMContamination(self.instrument, "i'")

    def create_pv_population(self, npop=50):
        pvp = self.ps.sample_from_prior(npop)
        if self.with_transit:
            #for sl in self.ps.blocks[1].slices:
            #    pvp[:,sl] = uniform(0.01**2, 0.25**2, size=(npop, 1))

            if self.with_contamination:
                p = pvp[:, self._sl_cn]
                p[:, 1] = uniform(size=npop)
                p[:, 1:] = normal(0, 0.2, size=(npop, self.npb - 1))
                p[:, 1:] = clip(p[:, 0][:, newaxis] + p[:, 1:], 0.001,
                                0.999) - p[:, 0][:, newaxis]

            # With LDTk
            # ---------
            #
            # Use LDTk to create the sample if LDTk has been initialised.
            #
            if self.ldps:
                istart = self._start_ld
                cms, ces = self.ldps.coeffs_tq()
                for i, (cm, ce) in enumerate(zip(cms.flat, ces.flat)):
                    pvp[:, i + istart] = normal(cm, ce, size=pvp.shape[0])

            # No LDTk
            # -------
            #
            # Ensure that the total limb darkening decreases towards
            # red passbands.
            #
            else:
                pvv = uniform(size=(npop, 2 * self.npb))
                pvv[:, ::2] = sort(pvv[:, ::2], 1)[:, ::-1]
                pvv[:, 1::2] = sort(pvv[:, 1::2], 1)[:, ::-1]
                pvp[:, self._sl_ld] = pvv
                #for i in range(pvp.shape[0]):
                #    pid = argsort(pvp[i, ldsl][::2])[::-1]
                #    pvp[i, ldsl][::2] = pvp[i, ldsl][::2][pid]
                #    pvp[i, ldsl][1::2] = pvp[i, ldsl][1::2][pid]

        # Estimate white noise from the data
        # ----------------------------------
        for i in range(self.nlc):
            wn = diff(self.ofluxa).std() / sqrt(2)
            pvp[:,
                self._start_err] = log10(uniform(0.5 * wn, 2 * wn, size=npop))
        return pvp

    def target_apertures(self, pv):
        pv = atleast_2d(pv)
        p = floor(clip(pv[:, self._sl_tap], 0., 0.999) *
                  (self.napt)).astype('int')
        return squeeze(p)

    def reference_apertures(self, pv):
        pv = atleast_2d(pv)
        p = floor(clip(pv[:, self._sl_ref], 0., 0.999) *
                  (self.napt + 1)).astype('int')
        return squeeze(p)

    def set_ofluxa(self, pv):
        self.ofluxa[:] = self.relative_flux(pv)

    def freeze_photometry(self, pv=None):
        pv = pv if pv is not None else self.de.minimum_location
        ps_orig = self.ps
        self._original_population = pvp = self.de.population.copy()
        self._frozen_population = delete(
            pvp, s_[self._sl_tap.start:self._sl_ref.stop], 1)
        self.taps = self.target_apertures(pv)
        self.raps = self.reference_apertures(pv)
        self._target_flux = self.target_flux(pv)
        self._reference_flux = self.reference_flux(pv)
        self.ofluxa[:] = self.relative_flux(pv)
        self.photometry_frozen = True
        self._init_parameters()
        if self.with_transit:
            for i in range(self._start_ld):
                self.ps[i].prior = ps_orig[i].prior
                self.ps[i].bounds = ps_orig[i].bounds
        self.de = DiffEvol(self.lnposterior,
                           clip(self.ps.bounds, -1, 1),
                           self.de.n_pop,
                           maximize=True,
                           vectorize=True)
        self.de._population[:, :] = self._frozen_population.copy()
        self.de._fitness[:] = self.lnposterior(self._frozen_population)

    def transit_model(self, pv, copy=True):
        if self.with_transit:
            pv = atleast_2d(pv)
            if self.chromatic_transit:
                mean_ar = pv[:, self._sl_k2].mean(1)
                pvv = zeros((pv.shape[0], pv.shape[1] - self.npb + 1))
                pvv[:, :4] = pv[:, :4]
                pvv[:, 4] = mean_ar
                pvv[:, 5:] = pv[:, 4 + self.npb:]

                pvp = map_pv(pvv)
                ldc = map_ldc(pvv[:, 5:5 + 2 * self.npb])
                flux = self.tm.evaluate_pv(pvp, ldc, copy)
                rel_ar = pv[:, self._sl_k2] / mean_ar[:, newaxis]
                flux = change_depth(rel_ar, flux, self.lcids, self.pbids)
            else:
                flux = super().transit_model(pv, copy)

            if self.with_contamination:
                p = pv[:, self._sl_cn]
                pv_cnt = zeros((pv.shape[0], self.npb))
                pv_cnt[:, 0] = p[:, 0]
                pv_cnt[:, 1:] = p[:, 1:] + p[:, 0][:, newaxis]
                bad = any(pv_cnt < 0.0, 1)
                flux = contaminate(flux, pv_cnt, self.lcids, self.pbids)
                flux[bad, 0] = inf
            return flux
        else:
            return 1.

    def flux_model(self, pv):
        baseline = self.baseline(pv)
        trends = self.trends(pv)
        model_flux = self.transit_model(pv, copy=True)
        return baseline * model_flux + trends

    def relative_flux(self, pv):
        return self.target_flux(pv) / self.reference_flux(pv)

    def target_flux(self, pv):
        pv = atleast_2d(pv)
        p = floor(clip(pv[:, self._sl_tap], 0., 0.999) *
                  self.napt).astype('int')
        off = zeros((p.shape[0], self.timea.size))
        for i, sl in enumerate(self.lcslices):
            off[:, sl] = self.ofluxes[i][:, p[:, i]].T
        return squeeze(off)

    def reference_flux(self, pv):
        pv = atleast_2d(pv)
        p = floor(clip(pv[:, self._sl_ref], 0., 0.999) * self.napt +
                  1).astype('int')
        r = zeros((pv.shape[0], self.ofluxa.size))
        nref = self.cids.shape[1]
        for ipb, sl in enumerate(self.lcslices):
            for i in range(nref):
                r[:, sl] += self.refs[ipb][i][:, p[:, ipb * nref + i]].T
            r[:, sl] = r[:, sl] / median(r[:, sl], 1)[:, newaxis]
        return squeeze(where(isfinite(r), r, inf))

    def extinction(self, pv):
        pv = atleast_2d(pv)
        ext = zeros((pv.shape[0], self.timea.size))
        for i, sl in enumerate(self.lcslices):
            st = self._start_ccoef + i * 6
            ext[:, sl] = exp(-pv[:, st + 2] * self.airmasses[i][:, newaxis]).T
            ext[:, sl] /= mean(ext[:, sl], 1)[:, newaxis]
        return squeeze(ext)

    def baseline(self, pv):
        pv = atleast_2d(pv)
        bl = zeros((pv.shape[0], self.timea.size))
        for i, sl in enumerate(self.lcslices):
            st = self._start_ccoef + i * 6
            p = pv[:, st:st + 6]
            #bl[:, sl] = (self.covariates[i] @ p[:,[0,1,3,4,5]].T).T
            bl[:,
               sl] = (self.covariates[i][:, [0, 2, 3]] @ p[:, [0, 3, 4]].T).T

        if self.n_legendre > 0:
            for i, sl in enumerate(self.lcslices):
                st = self._start_leg + i * self.n_legendre
                p = pv[:, st:st + self.n_legendre]
                bl[:, sl] += (self.legendre[i] @ p.T).T
        bl = bl * self.extinction(pv)
        return bl

    def lnlikelihood(self, pv):
        flux_m = self.flux_model(pv)
        wn = 10**(atleast_2d(pv)[:, self._sl_err])
        if self.photometry_frozen:
            return lnlike_logistic_v1d(self.ofluxa, flux_m, wn, self.lcids)
        else:
            return lnlike_logistic_v(self.relative_flux(pv), flux_m, wn,
                                     self.lcids)

    def ldprior(self, pv):
        ld = pv[:, self._sl_ld]
        #lds = ld[:,::2] + ld[:, 1::2]
        #return where(any(diff(lds, 1) > 0., 1), -inf, 0.)
        return where(
            any(diff(ld[:, ::2], 1) > 0., 1)
            | any(diff(ld[:, 1::2], 1) > 0., 1), -inf, 0)

    def inside_obs_prior(self, pv):
        return where(transit_inside_obs(pv, self._tmin, self._tmax, 20.), 0.,
                     -inf)

    def lnprior(self, pv):
        pv = atleast_2d(pv)
        lnp = self.ps.lnprior(pv)
        if self.with_transit:
            lnp += self.additional_priors(pv) + self.ldprior(pv)
            #lnp += self.ldprior(pv) + self.inside_obs_prior(pv) + self.additional_priors(pv)
        return lnp

    def add_t14_prior(self, mean: float, std: float) -> None:
        """Add a normal prior on the transit duration.

        Parameters
        ----------
        mean
        std

        Returns
        -------

        """
        def T14(pv):
            pv = atleast_2d(pv)
            a = as_from_rhop(pv[:, 2], pv[:, 1])
            t14 = duration_eccentric(pv[:, 1], sqrt(pv[:, 4]), a,
                                     arccos(pv[:, 3] / a), 0, 0, 1)
            return norm.logpdf(t14, mean, std)

        self.lnpriors.append(T14)

    def add_ldtk_prior(
        self,
        teff: tuple,
        logg: tuple,
        z: tuple,
        uncertainty_multiplier: float = 3,
        pbs: tuple = ('g', 'r', 'i', 'z')
    ) -> None:
        """Add a LDTk-based prior on the limb darkening.

        Parameters
        ----------
        teff
        logg
        z
        uncertainty_multiplier
        pbs

        Returns
        -------

        """
        fs = {
            n: f
            for n, f in zip('g r i z'.split(), (sdss_g, sdss_r, sdss_i,
                                                sdss_z))
        }
        filters = [fs[k] for k in pbs]
        self.ldsc = LDPSetCreator(teff, logg, z, filters)
        self.ldps = self.ldsc.create_profiles(1000)
        self.ldps.resample_linear_z()
        self.ldps.set_uncertainty_multiplier(uncertainty_multiplier)

        def ldprior(pv):
            pv = atleast_2d(pv)
            lnl = zeros(pv.shape[0])
            for i in range(pv.shape[0]):
                lnl[i] = self.ldps.lnlike_tq(pv[i, self._sl_ld])
            return lnl

        self.lnpriors.append(ldprior)

    def plot_light_curves(self,
                          model: str = 'de',
                          figsize: tuple = (13, 8),
                          fig=None,
                          gridspec=None):
        if fig is None:
            fig = figure(figsize=figsize, constrained_layout=True)

        gs = dict(height_ratios=(0.5, 2, 2, 1))
        if gridspec:
            gs.update(gridspec)

        axs = fig.subplots(4, self.nlc, sharey='row', gridspec_kw=gs)

        if model == 'de':
            pv = self.de.minimum_location
            err = 10**pv[self._sl_err]
            if not self.photometry_frozen:
                self.set_ofluxa(pv)

        elif model == 'mc':
            fc = array(self.posterior_samples(include_ldc=True))
            pv = permutation(fc)[:300]
            err = 10**median(pv[:, self._sl_err], 0)
            if not self.photometry_frozen:
                self.set_ofluxa(median(pv, 0))
        else:
            raise NotImplementedError(
                "Light curve plotting `model` needs to be either `de` or `mc`")

        ps = [50, 16, 84]
        if self.with_transit:
            tm = percentile(atleast_2d(self.transit_model(pv)), ps, 0)
        else:
            tm = percentile(atleast_2d(ones(self.timea.size)), ps, 0)
        fm = percentile(atleast_2d(self.flux_model(pv)), ps, 0)
        bl = percentile(atleast_2d(self.baseline(pv)), ps, 0)
        t0 = self.t0

        for i, sl in enumerate(self.lcslices):
            t = self.timea[sl]
            axs[1, i].plot(t, self.ofluxa[sl], '.', alpha=0.5)
            axs[1, i].plot(t, fm[0][sl], 'k', lw=2)
            axs[2, i].plot(t, self.ofluxa[sl] / bl[0][sl], '.', alpha=0.5)
            if model == 'mc':
                axs[2, i].fill_between(t,
                                       tm[1][sl],
                                       tm[2][sl],
                                       facecolor='darkblue',
                                       alpha=0.25)
            axs[2, i].plot(t, tm[0][sl], 'k', lw=2)
            axs[3, i].plot(t, self.ofluxa[sl] - fm[0][sl], '.', alpha=0.5)

            res = self.ofluxa[sl] - fm[0][sl]
            x = linspace(-4 * err, 4 * err)
            axs[0, i].hist(1e3 * res, 'auto', density=True, alpha=0.5)
            axs[0, i].plot(1e3 * x,
                           logistic(0, 1e3 * err[i]).pdf(1e3 * x), 'k')
            axs[0,
                i].text(0.05,
                        0.95,
                        f"$\sigma$ = {(1e3 * err[i] * pi / sqrt(3)):5.2f} ppt",
                        transform=axs[0, i].transAxes,
                        va='top')

        [
            ax.set_title(f"MuSCAT2 {t}", size='large')
            for ax, t in zip(axs[0], self.passbands)
        ]
        [setp(ax.get_xticklabels(), visible=False) for ax in axs[1:3].flat]
        setp(axs[1, 0], ylabel='Transit + Systematics')
        setp(axs[2, 0], ylabel='Transit - Systematics')
        setp(axs[3, 0], ylabel='Residuals')
        setp(axs[3, :], xlabel=f'Time - {self.t0:9.0f} [BJD]')
        setp(axs[0, :], xlabel='Residual [ppt]', yticks=[])
        [sb.despine(ax=ax, offset=5, left=True) for ax in axs[0]]
        return fig, axs

    def plot_posteriors(self,
                        figsize: tuple = (13, 5),
                        fig=None,
                        gridspec=None):
        if fig is None:
            fig = figure(figsize=figsize, constrained_layout=True)
        axs = fig.subplots(2, 3, gridspec_kw=gridspec)

        df = self.posterior_samples(include_ldc=True)
        df = df.iloc[:, :5].copy()
        df['k'] = sqrt(df.k2)

        names = 'stellar density, impact parameter, transit depth, radius ratio'.split(
            ', ')

        # Transit centre
        # --------------
        p = self.ps.priors[0]
        t0 = floor(df.tc.mean())
        trange = p.mean - t0 - 3 * p.std, p.mean - t0 + 3 * p.std
        x = linspace(*trange)
        axs[0, 0].hist(df.tc - t0,
                       50,
                       density=True,
                       range=trange,
                       alpha=0.5,
                       edgecolor='k',
                       histtype='stepfilled')
        axs[0, 0].set_xlabel(f'Transit center - {t0:9.0f} [BJD]')
        axs[0, 0].fill_between(x,
                               exp(p.logpdf(x + t0)),
                               alpha=0.5,
                               edgecolor='k')

        # Period
        # ------
        p = self.ps.priors[1]
        trange = p.mean - 3 * p.std, p.mean + 3 * p.std
        x = linspace(*trange)
        axs[0, 1].hist(df.pr,
                       50,
                       density=True,
                       range=trange,
                       alpha=0.5,
                       edgecolor='k',
                       histtype='stepfilled')
        axs[0, 1].set_xlabel('Period [days]')
        axs[0, 1].fill_between(x, exp(p.logpdf(x)), alpha=0.5, edgecolor='k')
        setp(axs[0, 1], xticks=df.pr.quantile([0.05, 0.5, 0.95]))

        # Rest without priors
        # -------------------
        for i, ax in enumerate(axs.flat[2:]):
            ax.hist(df.iloc[:, i + 2],
                    50,
                    density=True,
                    alpha=0.5,
                    edgecolor='k',
                    histtype='stepfilled')
            ax.set_xlabel(names[i])

        # TFOP Transit depth estimates
        axs[1, 1].axvline(1e-6 * self.toi.depth[0], c='0.5', lw=2)
        axs[1, 2].axvline(sqrt(1e-6 * self.toi.depth[0]), c='0.5', lw=2)

        setp(axs, yticks=[])
        return fig, axs

    def plot_chains(self, pids=(0, 1, 2, 3, 4)):
        fig, axs = subplots(len(pids),
                            1,
                            figsize=(13, 10),
                            constrained_layout=True,
                            sharex='all')
        x = arange(self.sampler.chain.shape[1])
        for i, (pid, ax) in enumerate(zip(pids, axs)):
            pes = percentile(self.sampler.chain[:, :, pid],
                             [50, 16, 84, 0.5, 99.5], 0)
            ax.fill_between(x, pes[3], pes[4])
            ax.fill_between(x, pes[1], pes[2])
            ax.plot(pes[0], 'k')
            setp(ax, ylabel=self.ps.names[pid])
        setp(axs, xlim=(0, x[-1]))

    def plot_running_mean(self,
                          figsize=(13, 5),
                          errors=True,
                          combine=False,
                          remove_baseline=True,
                          ylim=None,
                          npt=100,
                          width_min=10):
        pv = self.de.minimum_location
        rflux = self.relative_flux(pv)
        if remove_baseline:
            rflux /= squeeze(self.baseline(pv))

        if combine:
            bt, bf, be = running_mean(self.timea, rflux, npt, width_min)
            fig, axs = subplots(figsize=figsize, constrained_layout=True)
            if errors:
                axs.errorbar(bt, bf, be, drawstyle='steps-mid', c='k')
            else:
                axs.plot(bt, bf, drawstyle='steps-mid', c='k')
            axs.fill_between(bt,
                             bf - 3 * be,
                             bf + 3 * be,
                             alpha=0.2,
                             step='mid')

        else:
            rfluxes = [rflux[sl] for sl in self.lcslices]
            fig, axs = subplots(1,
                                self.nlc,
                                figsize=figsize,
                                constrained_layout=True,
                                sharey='all')
            for i, ax in enumerate(axs):
                bt, bf, be = running_mean(self.times[i], rfluxes[i], npt,
                                          width_min)
                if errors:
                    ax.errorbar(bt, bf, be, drawstyle='steps-mid', c='k')
                else:
                    ax.plot(bt, bf, drawstyle='steps-mid', c='k')

        if ylim:
            setp(axs, ylim=ylim)
Example #17
0
class LDTKHandler:
    '''
    The LDTKHandler provides an easy way to interface ldtk with TransitFit.

    Parameters
    ----------
    host_T : tuple
        The effective temperature of the host star, in Kelvin, given as a
        (value, uncertainty) pair.
    host_logg : tuple
        The log_10 of the surface gravity of the host star, with gravity
        measured in cm/s2. Should be given as a (value, uncertainty) pair.
    host_z : tuple
        The metalicity of the host, given as a (value, uncertainty) pair.
    filters : array_like
        The set of filters, given in [low, high] limits for the wavelengths
        with the wavelengths given in nanometers if a uniform filter is to
        be used, or [[wavelength...], [transmission]] if a fully-defined
        profile is being used. The ordering of the filters should
        correspond to the filter_idx parameter used elsewhere.
    ld_method : str, optional
        The model of limb darkening to use. Allowed values are 'linear',
        'quadratic', 'squareroot', 'power2', and 'nonlinear'. Default is
        'quadratic'.
    n_samples : int, optional
        The number of limb darkening profiles to create. Passed to
        ldtk.LDPSetCreator.create_profiles(). Default is 20000.
    do_mc : bool, optional
        If True, will use MCMC to estimate coefficient uncertainties more
        accurately. Default is False.
    cache_path : str, optional
        This is the path to cache LDTK files to. If not specified, will
        default to the LDTK default
    '''
    def __init__(self,
                 host_T,
                 host_logg,
                 host_z,
                 filters,
                 ld_model='quadratic',
                 n_samples=20000,
                 do_mc=False,
                 cache_path=None):

        # Sanity checks
        if not ld_model in _implemented_ld_models:
            raise ValueError('Unrecognised ld_model {}'.format(ld_model))

        self.default_model = ld_model

        # Set up the filters
        #print('Setting up filters')
        ldtk_filters = []
        for i, f in enumerate(filters):
            if isinstance(f[0], Iterable):
                # We have been passed a full filter profile, set up
                # TabulatedFilter
                # Work out if the profile is in percent or fraction - is
                # anything bigget than 1?
                if np.any(f[1] > 1):
                    tmf = 1e-2
                else:
                    tmf = 1
                ldtk_filters.append(TabulatedFilter(i, f[0], f[1], tmf))
            else:
                ldtk_filters.append(BoxcarFilter(i, f[0], f[1]))

        # Make the set creator, downloading data files if required
        if cache_path is not None:
            os.makedirs(cache_path, exist_ok=True)
        #print('Making LD parameter set creator.')
        #print('This may take some time as we may need to download files...')
        self.set_creator = LDPSetCreator(teff=host_T,
                                         logg=host_logg,
                                         z=host_z,
                                         filters=ldtk_filters,
                                         cache=cache_path)

        # Get the LD profiles from the set creator
        #print('Obtaining LD profiles')
        self.profile_set = self.set_creator.create_profiles(nsamples=n_samples)

        # Find the 'best values' for each filter and then find the ratios
        # compared to the first.
        #print('Finding coefficients and ratios')
        self.coeffs = {}
        self.ratios = {}

        self._power2_available = True

        for model in _implemented_ld_models:
            try:
                self.coeffs[model] = self._extract_best_coeffs(model)
                self.ratios[
                    model] = self.coeffs[model][0] / self.coeffs[model][0][0]
            except Exception as e:
                print(e)
                print(f'Unable to initialise {model} model')
                self._power2_available = False

    def estimate_values(self, ld0_values, ld_model):
        '''
        If given a set of LD param values for filter 0, will estimate the LD
        parameters for all filters based on the ratios between the best values
        found in initialisation.

        Parameters
        ----------
        ld1_values : float or array_like
            The LD parameters for filter 0
        ld_model : str
            The limb darkening model to use

        Returns
        -------
        all_ld_values : array_like, shape (n_filters, n_coeffs)
            The estimated limb darkening parameters
        '''
        if ld_model == 'power2' and not self._power2_available:
            raise ValueError(
                'power2 model is not available. If you want to use this, please use the development version of ldtk available on https://github.com/hpparvi/ldtk, rather than the pypi version.'
            )

        return ld0_values * self.ratios[ld_model]

    def _extract_best_coeffs(self, ld_model):
        '''
        Extracts the best values for a limb darkening model for the filters

        Parameters
        ----------
        ld_model : str
            The limb darkening model to obtain the values for.

        Returns
        -------
        coeffs : array_like, shape (n_filters, n_coeffs)
            The coefficients for each filter
        err : array_like, shape (n_filters, n_coeffs)
            The uncertainty on each of the coefficients
        '''
        if ld_model == 'linear':
            coeff, err = self.profile_set.coeffs_ln()
        elif ld_model == 'quadratic':
            coeff, err = self.profile_set.coeffs_qd()
        elif ld_model == 'nonlinear':
            coeff, err = self.profile_set.coeffs_nl()
        elif ld_model == 'power2':
            if not self._power2_available:
                raise ValueError(
                    'power2 model is not available. If you want to use this, please use the development version of ldtk available on https://github.com/hpparvi/ldtk, rather than the pypi version.'
                )
            coeff, err = self.profile_set.coeffs_p2()
        elif ld_model == 'squareroot':
            coeff, err = self.profile_set.coeffs_sq()

        else:
            raise ValueError('Unrecognised ld_model {}'.format(ld_model))

        return coeff, err

    def lnlike(self, coeffs, ld_model):
        '''
        Evaluates the log likelihood for a set of coefficients

        Parameters
        ----------
        coeffs : array_like, shape (n_filters, n_coeffs)
            The coefficients to evaluate the log likelihood for.
        ld_model : str, optional
            The model to use. Defaults to self.default_model

        '''
        if ld_model == 'linear':
            return self.profile_set.lnlike_ln(coeffs)
        if ld_model == 'quadratic':
            return self.profile_set.lnlike_qd(coeffs)
        if ld_model == 'nonlinear':
            return self.profile_set.lnlike_nl(coeffs)
        if ld_model == 'power2':
            if not self._power2_available:
                raise ValueError(
                    'power2 model is not available. If you want to use this, please use the development version of ldtk available on https://github.com/hpparvi/ldtk, rather than the pypi version.'
                )
            return self.profile_set.lnlike_p2(coeffs)
        if ld_model == 'squareroot':
            return self.profile_set.lnlike_sq(coeffs)

        raise ValueError('Unrecognised ld_model {}'.format(ld_model))
Example #18
0
def ldc_calc(star=None,filt=None,teff=None,logg=None,metal=None,binstart=500,binend=550,model='non-linear',compare=False,rescaled=True,plt=True):
    import numpy as np
    from scipy.interpolate import interp1d
    import ldtk
    from ldtk import (LDPSetCreator, BoxcarFilter,TabulatedFilter)
    from scipy.optimize import curve_fit
    #Import filters
    if filt == 'g430l':
        fil = np.loadtxt('/Users/jlothrin/Desktop/G430L.csv',delimiter=',',skiprows=1)
    if filt == 'g750l':
        fil = np.loadtxt('/Users/jlothrin/Desktop/G750L.csv',delimiter=',',skiprows=1)
    if filt == 'wfc3141':
        fil = np.loadtxt('/Users/jlothrin/Desktop/wfc3141.csv',delimiter=',',skiprows=1)
    if filt == 'IRAC_4_5':
        fil = np.loadtxt('/Users/jlothrin/Desktop/IRAC_4_5.csv',delimiter=',',skiprows=1)
    waves = fil[:,0]
    if filt[0] is 'g':
        waves = fil[:,0] / 10.
    if filt == 'IRAC_4_5':
        waves = fil[:,0]*1000.
    throughs = fil[:,1]
    f = interp1d(waves,throughs)
    waves_hires = np.linspace(min(waves),max(waves),500,endpoint=True)
    throughs_hires = f(waves_hires)
    
    
    #EVERYTHING MUST BE IN NM
    lower_wave = binstart
    upper_wave = binend
    
    w = [waves_hires[i] for i in range(0,len(waves_hires)) if waves_hires[i] > lower_wave if waves_hires[i] < upper_wave]
    t = [throughs_hires[i] for i in range(0,len(waves_hires)) if waves_hires[i] > lower_wave if waves_hires[i] < upper_wave]
        
    #filters = [BoxcarFilter('a',450,550), BoxcarFilter('b',650,750), BoxcarFilter('c',850,950)]
    #filters = [BoxcarFilter('a',650,750)]
    filters = [ldtk.TabulatedFilter('stis',w,t)]
    
    if star == '55cnc':
        sc = LDPSetCreator([5250, 100],[4.50, 0.10],[0.25, 0.05],filters)
        teff = 5250
        logg = 4.50
        metal = 0.25
    #sc = LDPSetCreator([6443,75],[4.76,0.09],[-0.08,0.05],filters)
    if star == 'wasp31':
        sc = LDPSetCreator([6250,50],[4.5,0.09],[-0.2,0.05],filters)
        teff = 6250
        logg = 4.5
        metal = -0.2
    if star == 'gj436':
        sc = LDPSetCreator([3416,100],[4.843,0.018],[0.02,0.2],filters)
        teff = 3416
        logg = 4.843
        metal = 0.02
    if star == 'gj3470':
        sc = LDPSetCreator([3652,50],[4.78,0.12],[0.18,0.08],filters)
        teff = 3652
        logg = 4.78
        metal = 0.18
    if star == 'hd97658':
        sc = LDPSetCreator([5217,33],[4.583,0.054],[-0.26,0.03],filters)
        teff = 5217
        logg = 4.583
        metal = -0.26
    if star == 'k218':
        sc = LDPSetCreator([3503,60],[4.863,0.13],[0.09,0.09],filters)
        teff = 3503
        logg = 4.863
        metal = 0.09
    if star == 'k138':
        sc = LDPSetCreator([3841,50],[4.886,0.01],[-0.280,0.1],filters)
        teff = 3841
        logg = 4.886
        metal = -0.280
    if star == 'wasp17':
        sc = LDPSetCreator([6666,30],[4.26,0.06],[-0.04,0.03],filters)
        teff = 6666
        logg = 4.26
        metal = -0.04
    if star is None:
        sc = LDPSetCreator([teff,50],[logg,0.1],[metal,0.1],filters)
                        
                      
    ps = sc.create_profiles(nsamples=100)
     
    qc,qe = ps.coeffs_qd(do_mc=False)
    nlc,nle = ps.coeffs_nl(do_mc=False)
    lnc,lne = ps.coeffs_ln(do_mc=False)
    print nlc
    print nle

    def nl_func(x,y1,y2,y3,y4):
        out = 1.-(y1*(1.-(x**(1./2.)))+y2*(1.-(x**(2./2.)))+y3*(1.-(x**(3./2.)))+y4*(1.-(x**(4./2.))))
        params = [y1,y2,y3,y4]
        bad = [1 for i in range(0,len(params)) if abs(params[i]) > 1.]
#        if np.sum(bad) > 0:
#            out = out+1000.
        return out
        
    fit= curve_fit(nl_func,ps._mu,ps.profile_averages[0],[0.2,0.9,-0.2,-0.03])
    
    if plt is True:
        figure()
        plot(ps._mu,ps.profile_averages[0],label='Model Intensity')
        plot(ps._mu,ps.profile_averages[0],'bo',label='Sampled Mu')
        #plot(ps._mu,1-qc[0,0]*(1-ps._mu)-qc[0,1]*(1-ps._mu)**2,'g--',label = 'Quad Fit')
        #plot(ps._mu,1-nlc[0,0]*(1-((ps._mu**(1./2.)))+nlc[0,1]*(1-ps._mu)+nlc[0,2]*(1-(ps._mu**(3./2.)))+nlc[0,3]*(1-(ps._mu**(2.)))),'r',label = 'Non-Linear Fit')
        plot(ps._mu,1-lnc[0,0]*(1-ps._mu),'y',label='Linear Fit')
        plot(ps._mu,nl_func(ps._mu,fit[0][0],fit[0][1],fit[0][2],fit[0][3]),'g',label = 'My non-linear fit')
        plot(ps._mu,nl_func(ps._mu,nlc[0][0],nlc[0][1],nlc[0][2],nlc[0][3]),'--k',label='Non-Linear Fit')

        
        #sing 450-460
        #plot(ps._mu,nl_func(ps._mu,0.1778,1.1859,-0.7235,0.1887),'k',label='Sing+2013')    
        #sing 460-470
        #plot(ps._mu,nl_func(ps._mu,0.2136,0.8688,-0.1927,-0.0317),'k',label='Sing+2013')
    
    
    new_mu = [(x-min(ps._mu))/(max(ps._mu)-min(ps._mu)) for x in ps._mu]
    
    refit= curve_fit(nl_func,new_mu,ps.profile_averages[0],[0.2,0.7,-0.2,-0.03])
    if plt is True:    
        plot(new_mu,ps.profile_averages[0],label='Rescaled Model Intensity')
        plot(new_mu,nl_func(np.asarray(new_mu),refit[0][0],refit[0][1],refit[0][2],refit[0][3]),'b',label = 'My rescaled non-linear fit')
    
    #my model for 55 cancri
    if compare is True:
        good_mu_old = [0.1313,0.19335,0.2554,0.31745,0.3795,0.44155,0.5036,0.56565,0.6277,0.68975,0.7518,0.8135,0.876,0.93795,1.0]
        one_sum_norm_old =      [ 0.37541731740693157,      0.46238495959398135,      0.52611395484286205,      0.57920999647415661,      0.62658004547351620,      0.67049257831603259,      0.71208564917444228,      0.75195835479518414,
              0.79045298237117534  ,    0.82776743852276846 ,     0.86401545614819741,      0.89927622441607569 ,     0.93360250587690652 ,     0.96703568174637822,       1.0000000000000000]
        good_mu = [0.1313,0.19335,0.2554,0.31745,0.3795,0.44155,0.5036,0.56565,0.6277,0.68975,0.7518,0.8135,0.876,0.93795,1.0]
        one_sum_norm = [0.38759735798258849, 0.46854567224818039,      0.52910322497992557,      0.58049544322307434,      0.62695066358655005,      0.67038264597838637,      0.71174173136349628,      0.75153440209321387,
              0.79003806657404141,      0.82741678251146478,      0.86376146640146356,      0.89913775107728544 ,     0.93359035990132300,      0.96715566457928104 ,      1.0000000000000000]
        
        if plt is True:
            plot(good_mu,one_sum_norm,'r--',label='Josh model')
            plot(good_mu,one_sum_norm,'ro',label='Josh Sampled Mu')
        
            plot(good_mu_old,one_sum_norm_old,'k--',label='Josh model old')
            plot(good_mu_old,one_sum_norm_old,'ko',label='Josh Sampled Mu old')
        
    #ylim([0.,1.0])
    #legend(loc='lower right')
    #ylabel('Normalized Intensity')
    #xlabel('Mu')
    #title('LDTK - Teff = 5250 logg = 4.33, z = 0.25 - 650-750 nm')
    #text(0.4,0.4,'Q1 = '+str(qc[0,0]))
    #text(0.4,0.36,'Q2 = '+str(qc[0,1]))
    if plt is True:
        legend(loc='lower right')
        ylabel('Normalized Intensity')
        xlabel('Mu')
        title('LDTK - Teff = '+str(teff)+', logg = '+str(logg)+', z = '+str(metal)+' - STIS - '+str(lower_wave)+'-'+str(upper_wave)+' nm')
    
    if rescaled is True:
        out = refit[0]
    if rescaled is False:
        out = fit[0]
       
    #pdb.set_trace()
        
    return out,nle
Example #19
0
class WithTLCs(TransmissionSpectrum):
    """Transmission spectrum object that also contains the light curves (needed for initial fits, not for later plotting.)"""
    def __init__(self, cube, maskname='defaultMask', **kwargs):
        TransmissionSpectrum.__init__(self, cube, **kwargs)

        # load the light curves associate with this transmission spectrum
        self.constructBins()
        self.speak('its name is {0}'.format(self))

        # manage the mask, and create a default dummy mask if need be
        self.maskname = maskname
        self.applyMask()

    def constructBins(self):
        '''Populate the transmission spectrum bins with light curves.'''
        self.speak("constructing the bins of {0}".format(self))

        # what files exist?
        possibleTLCs = glob.glob(self.rawlightcurvedirectory + '*.lightcurve')
        self.speak('there are {0} .lightcurve files in {1}'.format(
            len(possibleTLCs), self.rawlightcurvedirectory))

        # do we have to make the light curve files from the spectroscopic cube?
        if len(possibleTLCs) == 0:
            self.speak("creating .lightcurve(s) from the spectroscopic cube")

            # bin and save the light curves for this binsize
            self.cube.makeLCs()

            # return the list of light curve filenames now
            possibleTLCs = glob.glob(self.rawlightcurvedirectory +
                                     '*.lightcurve')

        # make sure at least one TLC exists
        assert (len(possibleTLCs) > 0)

        # pull out the ????to???? strings from the .lightcurve filenames
        chunks = []
        for file in possibleTLCs:
            chunks.append(
                (file.split('/')[-1].split('.lightcurve')[0]).split('_')[-1])

        # populate bins (and their central wavelengths)
        bins = []
        wavelengths = []
        for trimmed in chunks:
            # figure out the boundaries of this bin
            left = np.int(trimmed.split('to')[0])
            right = np.int(trimmed.split('to')[-1])
            # create a wavelength bin (but don't load its lightcurve yet)
            bins.append(WavelengthBin(self, left=left, right=right))
            wavelengths.append(bins[-1].wavelength)

        # assign the bins to this spectrum, sorted by wavelength
        self.bins = np.array(bins)[np.argsort(wavelengths)]
        self.wavelengths = np.array(wavelengths)[np.argsort(wavelengths)]

        # loop over the bins, and load the light curve for each
        self.loadLCs()

        # read the first bin (necessary for setting up initial conditions for fits)
        self.speak(
            "spectrum contains {0} bins covering {1} to {2}{3} at {4}{3} resolution"
            .format(len(self.bins), self.bins[0].left / self.unit,
                    self.bins[-1].right / self.unit, self.unitstring,
                    self.binsize / self.unit))

    def loadLCs(self):
        '''Loop over bins, load their light curves.'''
        self.speak('loading all the light curves for all the bins' '')
        for b in self.bins:
            b.readTLC()

    @property
    def nBins(self):
        """Return the number of bins in this spectrum."""
        try:
            return self._nBins
        except:
            self._nBins = len(self.bins)
            return self._nBins

    @property
    def nTimes(self):
        """Return the *maximum* number of times among any of the lightcurves associated with this spectrum."""
        try:
            return self._nTimes
        except:
            # figure out the maximum number of times there are
            self._nTimes = 0
            for i in range(self.nBins):
                b = self.bins[i]
                try:
                    b.tlc.flux
                except:
                    b.readTLC()
                l = len(b.tlc.flux)
                # update the maximum nTimes, and the most densely populated times (necessary for interpolation)
                if l > self._nTimes:
                    self._nTimes = l
            return self._nTimes

    def toArray(self, key):
        '''Create an image array of a TLC variable.'''

        # create an empty array
        a = np.zeros((self.nBins, self.nTimes))

        # we can't guarantee the tlcs all have the exact same timestamps
        #  because some data points might have been thrown out, so we have
        #  to do the interpolation at least once (we can store interpolation
        #  indices inside the bin object to speed this in the future)

        # loop over the bins
        for i in range(self.nBins):

            # pick out this bin's TLC
            tlc = self.bins[i].tlc

            try:
                a[i, :] = tlc.__dict__[key]
            except KeyError:
                a[i, :] = tlc.externalvariables[key]
        return a

    #def interpolationindices(self, tlc):
    #		# define the interpolation indices for this TLC
    #		try:
    #			return tlc.interpolationindices
    #		except:
    #			# interpolate from times to indices
    #			interpolation = scipy.interpolate.interp1d(self.themostdenselypopulatedtimes, np.arange(self.nTimes), bounds_error=True, kind='nearest')
    #			tlc.interpolationindices = interpolation(tlc.bjd).astype(np.int)
    #			return tlc.interpolationindices

    def createMask(self, empty=False, afterfastfit=False):

        if empty:
            maskname = 'defaultMask'
            mask = np.array(self.toArray('bad')).astype(np.byte)
        elif afterfastfit:
            maskname = 'trimOutliers'
            mask = np.array(self.toArray('bad')).astype(np.byte)
        else:
            a = raw_input("What would you like to call this mask?\n ")
            maskname = a.replace(" ", "")

            keys = ['peak_', 'sky_', 'width_', 'centroid_']
            # one column for light curves, one for target, one for comparison
            nColumns = 3
            nRows = len(keys)
            plt.figure('masking')
            ip = craftroom.displays.iplot.iplot(nRows, nColumns)

            kw = dict(cmap='gray',
                      interpolation='nearest',
                      aspect='auto',
                      vmin=None,
                      vmax=None)

            mask = np.array(self.toArray('bad')).astype(np.byte)
            nBins = len(self.bins)

            threshold = 4
            # identify outliers
            try:
                for i in range(nBins):
                    residuals = self.bins[i].tlc.residuals()
                    noise = np.std(residuals)
                    bad = np.abs(residuals) > threshold * noise
                    self.speak(
                        " in bin {0}, {1} points exceeded {2} x {3}".format(
                            i, np.sum(bad), threshold, noise))
                    mask[i, :] = mask[
                        i, :] | bad * self.bins[0].tlc.flags['outlier']
            except:
                pass

            # identify "saturated"
            #saturationthreshold = 1.8e6
            #saturated =  (self.toArray('peak_target') > saturationthreshold) |  (self.toArray('peak_target') > saturationthreshold)
            #print "  {0} total points were over {1} in their peak counts".format(np.sum(saturated), saturationthreshold)
            #mask = mask | saturated*self.bins[0].tlc.flags['saturation']

            # set up the axes (this can be slow, so do it once)
            ax = ip.subplot(0, 0, name='flux')
            sub = {}
            sub['sharex'], sub['sharey'] = ip.axes['flux'], ip.axes['flux']
            ax = ip.subplot(1, 0, name='instrumental', **sub)
            ax = ip.subplot(2, 0, name='corrected', **sub)
            ax = ip.subplot(3, 0, name='residuals', **sub)
            for i in range(len(keys)):
                key = keys[i] + 'target'
                ax = ip.subplot(i, 1, name=key, **sub)
                key = keys[i] + 'star01'
                ax = ip.subplot(i, 2, name=key, **sub)

            keepgoing = True
            while keepgoing:
                # clear the axes again
                for k in ip.axes.keys():
                    ip.axes[k].cla()

                # plot the initial light curve
                flux = self.toArray('flux')
                kw['vmin'], kw['vmax'] = 0.98, 1.02
                ip.axes['flux'].imshow(flux, **kw)
                ip.axes['flux'].set_title('flux')

                # plot the instrumental correction
                instrumental = np.zeros_like(flux)
                residuals = np.zeros_like(flux)
                try:
                    for i in range(nBins):
                        instrumental[i, :] = self.bins[i].tm.instrument_model()
                        residuals[i, :] = self.bins[i].tlc.residuals()
                except:
                    pass

                ip.axes['instrumental'].imshow(instrumental, **kw)
                ip.axes['instrumental'].set_title('instrumental')

                ip.axes['corrected'].imshow(flux / instrumental, **kw)
                ip.axes['corrected'].set_title('corrected')

                kw['vmin'], kw['vmax'] = 0.98 - 1, 1.02 - 1
                ip.axes['residuals'].imshow(residuals, **kw)
                ip.axes['residuals'].set_title('residuals')

                # plot target diagnostics
                for i in range(len(keys)):
                    key = keys[i] + 'target'
                    ax = ip.axes[key]
                    ax.set_title(key)
                    array = self.toArray(key)
                    kw['vmin'], kw['vmax'] = None, None
                    ax.imshow(array, **kw)

                # plot comparison diagnostics
                for i in range(len(keys)):
                    key = keys[i] + 'star01'
                    ax = ip.axes[key]
                    ax.set_title(key)
                    array = self.toArray(key)
                    kw['vmin'], kw['vmax'] = None, None
                    ax.imshow(array, **kw)

                # zoom out a tiny bit to make selection easier
                for k in ip.axes.keys():
                    y, x = self.toArray('flux').shape
                    ip.axes[k].set_xlim(0 - x / 20, x + x / 20)
                    ip.axes[k].set_ylim(0 - y / 20, y + y / 20)

                masked = np.ma.masked_where(mask == 0, mask)
                my_cmap = copy.copy(plt.cm.get_cmap(
                    'autumn'))  # get a copy of the gray color map
                my_cmap.set_bad(alpha=0)
                for k in ip.axes.keys():
                    ax = ip.axes[k]
                    ax.imshow(masked,
                              cmap=my_cmap,
                              alpha=0.5,
                              aspect='auto',
                              interpolation='nearest')

                plt.draw()
                unreasonableanswer = True
                while unreasonableanswer:
                    answer = raw_input(
                        "What would you like to do? [a]dd masking, [s]ubtract masking, [r]efit using this mask, [f]inish?\n   (I'd like to) "
                    )
                    unreasonableanswer = False
                    if 'a' in answer:
                        self.speak(
                            "  Click at the two corners of a box you'd like to mask.\n"
                        )
                        clicks = ip.getMouseClicks(2)

                        rows = (clicks[0].ydata, clicks[1].ydata)
                        top = np.int(np.max(rows))
                        bottom = np.maximum(np.int(np.min(rows)), 0)

                        columns = (clicks[0].xdata, clicks[1].xdata)
                        right = np.int(np.max(columns))
                        left = np.maximum(np.int(np.min(columns)), 0)

                        mask[bottom:top, left:right] = mask[
                            bottom:top,
                            left:right] | self.bins[0].tlc.flags['custom']

                    elif 's' in answer:
                        self.speak(
                            "  Click at the two corners of a box you'd like to unmask.\n"
                        )
                        clicks = ip.getMouseClicks(2)

                        rows = np.round((clicks[0].ydata, clicks[1].ydata))
                        top = np.int(np.max(rows))
                        bottom = np.maximum(np.int(np.min(rows)), 0)

                        columns = np.round((clicks[0].xdata, clicks[1].xdata))
                        right = np.int(np.max(columns))
                        left = np.maximum(np.int(np.min(columns)), 0)

                        mask[bottom:top, left:right] -= mask[
                            bottom:top,
                            left:right] & self.bins[0].tlc.flags['custom']

                    elif 'r' in answer:
                        self.speak("Okay, refitting. It may take a while!")
                        self.mask = mask
                        self.applyMask(maskname=maskname)
                        self.fitRigid()
                    elif 'f' in answer:
                        keepgoing = False
                    else:
                        unreasonableanswer = True
                        self.speak(
                            "  I'm sorry, I didn't quite understand that.")

        # set the mask and maskname attributes
        self.maskname = maskname
        self.mask = mask

        # save the mask to the mask directory
        self.saveMask()

        # loop through the light curves and apply the mask to each
        self.applyMask(maskname=maskname)

    def saveMask(self):
        "Save the masking array for this particular mask."
        self.speak('saving mask "{0}" to {1}'.format(self.maskname,
                                                     self.maskdirectory))
        np.save(self.maskdirectory + 'mask.npy', self.mask)

    def applyMask(self, maskname=None):
        "Load a mask from this mask directory and apply it to the light curves."

        # update the maskname attribute to the desired one
        if maskname is not None:
            self.maskname = maskname

        # load the mask from the masking directory (probably slows things down a bit, but ensure everything links up)
        try:
            self.mask = np.load(self.maskdirectory + 'mask.npy')
            self.speak('loaded mask from {0}'.format(self.maskdirectory))
        except:
            self.speak(
                "couldn't load requested mask '{0}', so reverting to default".
                format(self.maskname))
            self.createMask(empty=True)

        # loop through all the bins, and apply the mask to the individual lightcurves
        nBins = len(self.bins)
        for i in range(nBins):
            b = self.bins[i]
            try:
                b.tlc
            except:
                b.readTLC()
            b.tlc.bad = self.mask[i, :]

    def fit(self, planet, star, instrument, plot=True):
        '''Take an input planet, star, and instrument; and do a fit across all wavelength bins.'''
        self.rp_over_rs = np.zeros(len(self.bins))
        self.uncertainty = np.zeros_like(self.rp_over_rs)
        self.wavelengths = np.zeros_like(self.rp_over_rs)

        #for i in range(len(self.bins)):
        #	b = self.bins[i]
        #	b.fit(planet, star, instrument, plot=plot)
        #	self.rp_over_rs[i] = b.tm.planet.rp_over_rs.value
        ##	self.wavelengths[i] = b.wavelength

    def load(self, method='lm'):
        '''Load light curves and fits for this transmission spectrum.'''

        self.wavelengths = np.zeros(len(self.bins))
        self.fitted, self.uncertainty = {}, {}

        for i in np.arange(len(self.bins)):
            # select an individual bin
            bin = self.bins[i]
            # load its TLC and TM
            bin.load()
            # load (or create) the fit
            if method == 'mcmc':
                bin.tm.slowfit(plot=False, remake=False)
            else:
                bin.tm.fastfit(plot=False, remake=False)

            # loop over the parameters, and store them in the spectrum
            for parameter in bin.tm.fitter.pdf.parameters:
                key = parameter.name
                try:
                    self.fitted[key][i] = parameter.value
                    self.uncertainty[key][i] = parameter.uncertainty
                except KeyError:
                    self.fitted[key] = np.full(len(self.bins), None)
                    self.uncertainty[key] = np.full(len(self.bins), None)
                    self.fitted[key][i] = parameter.value
                    self.uncertainty[key][i] = parameter.uncertainty

            self.wavelengths[i] = bin.wavelength

    def setupInitialConditions(self):

        self.initial = {}
        self.initial['planet'] = Planet(J=0.0, \
            k=0.107509268533, \
            rs_over_a =0.136854018274, \
            b =0.143228040337, \
            q=0.0, \
            period=3.95023867775, \
            t0=2456416.39659, \
            dt = -0.000109927092499, \
            esinw=0.0, \
            ecosw=0.0)
        self.initial['star'] = Star(u1=0.47,
                                    u2=0.33,
                                    temperature=6170.0,
                                    logg=4.27,
                                    metallicity=0.26)
        self.initial['instrument'] = Instrument(self.bins[0].tlc, order=2)

        self.speak('using LDTk to estimate limb-darkening coefficients')

        self.ldtk_filename = os.path.join(self.binningdirectory,
                                          'ldtk_coefs.npy')
        try:
            self.ldtk_coefs, self.ldtk_coefuncertainties = np.load(
                self.ldtk_filename)
            self.speak('loaded LD coefficients from {}'.format(
                self.ldtk_filename))
        except IOError:
            # create some filters for the limb-darkening
            self.ldtk_filters = [
                BoxcarFilter(b.identifier, b.left / 10, b.right / 10)
                for b in self.bins
            ]

            # set up the profile creator
            self.ldtk_sc = LDPSetCreator(
                teff=(6170, 80),  # Define your star, and the code
                logg=(4.27, 0.07),  # downloads the uncached stellar
                z=(0.26, 0.15),  # spectra from the Husser et al.
                filters=self.ldtk_filters)  # FTP server automatically.

            self.ldtk_profiles = self.ldtk_sc.create_profiles()

            self.ldtk_coefs, self.ldtk_coefuncertainties = self.ldtk_profiles.coeffs_qd(
                do_mc=False)
            np.save(self.ldtk_filename,
                    (self.ldtk_coefs, self.ldtk_coefuncertainties))
            self.speak('saved new LD coefficients to {}'.format(
                self.ldtk_filename))

        # initialize a limb-darkening object
        #self.ld = LD.LD(temperature=self.initial['star'].temperature.value, gravity=self.initial['star'].logg.value, metallicity=self.initial['star'].metallicity.value, directory = self.binningdirectory, plot=True)

    def setupFit(self,
                 label='fixedGeometry',
                 maskname='defaultMask',
                 remake=True):

        # set the label to the kind of fit
        self.label = label

        self.applyMask(maskname)

        # make sure some initial conditions are set
        self.setupInitialConditions()

        # pull out the initial planet, star, and instrument
        p, s, i = self.initial['planet'], self.initial['star'], self.initial[
            'instrument']

        instrumentallimits = [-0.05, 0.05]
        # modify these according to what kind of a fit we want to use
        if label == 'fixedGeometry' or label == 'floatingLD':

            # float the radius ratio
            p.k.float(limits=[0.05, 0.15])

            # a constant baseline
            i.C.float(value=1.0, limits=[0.5, 1.5])

            # instrument rotator angle (seems to matter!)
            i.rotatore_tothe1.float(value=0.002, limits=instrumentallimits)

            # width of the whole spectrum, and the width in the wavelength range
            i.width_target_tothe1.float(value=0.002, limits=instrumentallimits)
            i.dwidth_target_tothe1.float(value=0.002,
                                         limits=instrumentallimits)

            # cross-dispersion centroid of the whole spectrum, and in the wavelength range
            i.centroid_target_tothe1.float(value=0.002,
                                           limits=instrumentallimits)
            i.dcentroid_target_tothe1.float(value=0.002,
                                            limits=instrumentallimits)

            # applied wavelength offset
            i.shift_target_tothe1.float(value=0.002, limits=instrumentallimits)

            # the sky brightness in
            i.sky_target_tothe1.float(value=0.002, limits=instrumentallimits)
            i.peak_target_tothe1.float(value=0.002, limits=instrumentallimits)

            # allow the limbdarkening to float [a prior for each bin will be set later]
            if label == 'floatingLD':
                s.u1.float(value=s.u1.value, limits=[0.0, 1.0])
                s.u2.float(value=s.u2.value, limits=[0.0, 1.0])
            else:
                s.u1.fix(value=s.u1.value)
                s.u2.fix(value=s.u2.value)

            return

        if label == 'floatingGeometry':
            p.rs_over_a.float(value=0.14, limits=[0.0, 0.3], shrink=1000.0)
            p.k.float(limits=[0.05, 0.15])
            p.b.float(value=0.8, limits=[0.0, 1.0], shrink=1000.0)
            p.dt.float(limits=np.array([-0.01, 0.01]))
            i.C.float(value=1.0, limits=[0.9, 1.1])
            i.rotatore_tothe1.float(value=0.002, limits=instrumentallimits)
            i.width_target_tothe1.float(value=0.002, limits=instrumentallimits)
            i.centroid_target_tothe1.float(value=0.002,
                                           limits=instrumentallimits)
            i.shift_target_tothe1.float(value=0.002, limits=instrumentallimits)
            i.dwidth_target_tothe1.float(value=0.002,
                                         limits=instrumentallimits)
            i.dcentroid_target_tothe1.float(value=0.002,
                                            limits=instrumentallimits)
            i.sky_target_tothe1.float(value=0.002, limits=instrumentallimits)
            i.peak_target_tothe1.float(value=0.002, limits=instrumentallimits)
            return

        assert (False)

    def psi(self):
        """Quick wrapper to spit out a tuple of the initial conditions."""
        try:
            return self.initial['planet'], self.initial['star'], self.initial[
                'instrument']
        except AttributeError:
            self.setupFit()
            return self.initial['planet'], self.initial['star'], self.initial[
                'instrument']

    def fitBins(self,
                label='fixedGeometry',
                maskname='defaultMask',
                remake=False,
                slow=False,
                plot=False,
                **kw):
        self.speak('about to fit {0} bins with:')
        for k in locals().keys():
            self.speak('   {0} = {1}'.format(k, locals()[k]))
        #self.input('are you okay with that?')

        self.setupFit(label=label, maskname=maskname, remake=True)

        if label == 'floatingLD':
            kw['ldpriors'] = False

        assert (self.label == label)
        for b in self.bins:
            b.fit(plot=plot,
                  slow=slow,
                  remake=remake,
                  label=label,
                  maskname=maskname,
                  **kw)
            assert (self.label == label)