def plot_flux_vs_time(self, ax = None): transits = self.zero_epoch + unique(epoch(self.time, self.zero_epoch, self.period)) * self.period [ax.axvline(t, ls='--', alpha=0.5, lw=1) for t in transits] ax.plot(self.time, self.flux) tb, fb, eb = downsample_time(self.time, self.flux, 1 / 24) ax.plot(tb, fb, 'k', lw=1) setp(ax, xlabel=f'Time [BJD]', ylabel='Normalized flux', xlim=self.time[[0,-1]])
def plot_flux_vs_time(self, ax=None): rpc = percentile(self.flux_raw, [0.5, 99.5, 50]) dpc = percentile(self.flux_detrended, [0.5, 99.5, 50]) d = 1.15 bbox_raw = rpc[-1] + d * (rpc[0] - rpc[-1]), rpc[-1] + d * (rpc[1] - rpc[-1]) bbox_dtr = dpc[-1] + d * (dpc[0] - dpc[-1]), dpc[-1] + d * (dpc[1] - dpc[-1]) offset = d * (rpc[1] - rpc[-1]) - d * (dpc[0] - dpc[-1]) ax.plot(self.time_detrended - self.bjdrefi, self.flux_detrended + 1.2*offset, label='detrended') ax.plot(self.time_raw - self.bjdrefi, self.flux_raw, label='raw') if self.zero_epoch: transits = self.zero_epoch + unique(epoch(self.time, self.zero_epoch, self.period)) * self.period [ax.axvline(t - self.bjdrefi, ls='--', alpha=0.5, lw=1) for t in transits if self.time[0] < t < self.time[-1]] def time2epoch(x): return (x + self.bjdrefi - self.zero_epoch) / self.period def epoch2time(x): return self.zero_epoch - self.bjdrefi + x * self.period secax = ax.secondary_xaxis('top', functions=(time2epoch, epoch2time)) secax.set_xlabel('Epoch') secax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.legend(loc='upper right') ax.autoscale(axis='x', tight=True) yp_offset = diff(ax.transData.inverted().transform([[0, 0], [0, 40]])[:, 1]) setp(ax, xlabel=f'Time - {self.bjdrefi} [BJD]', ylabel='Normalized flux', ylim=(bbox_raw[0] - yp_offset, bbox_dtr[1] + offset + yp_offset))
def __call__(self, *args, **kwargs): self.logger = getLogger( f"{self.name}:{self.ts.name.lower().replace('_','-')}") self.logger.info("Running BLS periodogram") self._periods = linspace(self.ts.pmin, self.ts.pmax, self.ts.nper) self.bls = BoxLeastSquares(self.ts.time * u.day, self.ts.flux, self.ts.ferr) self.result = self.bls.power(self._periods, self._durations, objective='snr') for p in self.ts.masked_periods: self.result.depth_snr *= maskf(self._periods, p, .1) self.result.log_likelihood *= maskf(self._periods, p, .1) i = argmax(self.result.depth_snr) self.period = self.result.period[i].value self.snr = self.result.depth_snr[i] self.duration = self.result.duration[i].value self.depth = self.result.depth[i] t0 = self.result.transit_time[i].value ep = epoch(self.ts.time.min(), t0, self.period) self.zero_epoch = t0 + ep * self.period self.ts.update_ephemeris(self.zero_epoch, self.period, self.duration, self.depth) self.logger.info( f"BLS SNR {self.snr:.2f} period {self.period:.2f} d, duration {24*self.duration:.2f} h" )
def plot_flux_vs_time(self, ax=None): transits = self.zero_epoch + unique(epoch(self.time, self.zero_epoch, self.period)) * self.period [ax.axvline(t - self.bjdrefi, ls='--', alpha=0.5, lw=1) for t in transits] offset1 = -1.05 * (self.trtime - self.fraw).min() offset2 = offset1 - 1.05 * (self.trposi - self.trtime).min() offset3 = offset2 - 1.05 * (self.flux - self.trposi).min() time = self.time - self.bjdrefi ax.plot(time, self.flux + offset3, label='Detrended flux') ax.plot(time, self.trposi + offset2, label='Rotation trend') ax.plot(time, self.trtime + offset1, label='Time trend') ax.plot(time, self.fraw, label='PDC Flux') ax.legend(loc='upper right') ax.autoscale(axis='x', tight=True) setp(ax, xlabel=f'Time - {self.bjdrefi} [BJD]', ylabel='Normalized flux')
def plot_gb_transits(self, method='de', pv: ndarray = None, figsize: tuple = (14, 2), axes=None, ncol: int = 4, xlim: tuple = None, ylim: tuple = None): if pv is None: if method == 'de': pv = self.de.minimum_location else: raise NotImplementedError nlc = self.nlc - self._stess nrow = int(floor(nlc / ncol)) if axes is None: fig, axs = subplots(nrow, ncol, figsize=figsize, constrained_layout=True, sharex='all', sharey='all', squeeze=False) else: fig, axs = None, axes [ax.autoscale(enable=True, axis='x', tight=True) for ax in axs.flat] fmodel = squeeze(self.flux_model(pv)) etess = self._stess t0, p = self.de.minimum_location[[0, 1]] for i, ax in enumerate(axs.T.flat): t = self.times[etess + i] e = epoch(t.mean(), t0, p) tc = t0 + e * p tt = 24 * (t - tc) ax.plot(tt, self.fluxes[etess + i], 'k.', alpha=0.2) ax.plot(tt, fmodel[self.lcslices[etess + i]], 'k') setp(axs, xlim=xlim, ylim=ylim) setp(axs[-1, :], xlabel='Time - T$_c$ [h]') setp(axs[:, 0], ylabel='Normalised flux') return fig
def plot_m2_transits(self, figsize=(14, 5)): fig, axs = subplots(3, 4, figsize=figsize, constrained_layout=True, sharex='all', sharey='all') fmodel = squeeze(self.flux_model(self.de.population))[self.de.minimum_index] etess = self._stess t0, p = self.de.minimum_location[[0,1]] for i, ax in enumerate(axs.T.flat): t = self.times[etess + i] e = epoch(t.mean(), t0, p) tc = t0 + e * p ax.plot(t - tc, self.fluxes[etess + i], 'k.', alpha=0.2) ax.plot(t - tc, fmodel[self.lcslices[etess + i]], 'k') setp(ax, xlim=(-0.045, 0.045)) setp(axs, ylim=(0.92, 1.05)) setp(axs[-1, :], xlabel='Time [BJD]') setp(axs[:, 0], ylabel='Normalised flux') return fig
def _compute_indices(self): self.qids = unique(self.qidarr) self.tids = unique(self.tidarr) self.nt = len(self.tids) self.npt = self.time.size self.qslices = [ slice(*where(self.qidarr == qid)[0][[0, -1]] + [0, 1]) for qid in self.qids ] self.qsldict = { qid: slice(*where(self.qidarr == qid)[0][[0, -1]] + [0, 1]) for qid in self.qids } for i, tid in enumerate(self.tids): self.tidarr[self.tidarr == tid] = i self.tids = unique(self.tidarr) self.tslices = [ slice(*where(self.tidarr == tid)[0][[0, -1]] + [0, 1]) for tid in self.tids ] self.orbit_n = array( [epoch(t.mean(), self.t0, self.p) for t in self.time_per_transit])
def __call__(self, npop: int = 40, de_niter: int = 1000, mcmc_niter: int = 200, mcmc_repeats: int = 3, initialize_only: bool = False): self.logger = getLogger(f"{self.name}:{self.ts.name.lower().replace('_','-')}") self.logger.info(f"Fitting {self.mode} transits") self.ts.transit_fits[self.mode] = self epochs = epoch(self.ts.time, self.ts.zero_epoch, self.ts.period) if self.mode == 'all': mask = ones(self.ts.time.size, bool) elif self.mode == 'even': mask = epochs % 2 == 0 elif self.mode == 'odd': mask = epochs % 2 == 1 else: raise NotImplementedError mask &= abs(self.ts.phase - 0.5*self.ts.period) < 4 * 0.5 * self.ts.duration self.ts.transit_fit_masks[self.mode] = self.mask = mask self.epochs = epochs = epochs[mask] self.time = self.ts.time[mask] self.fobs = self.ts.flux[mask] tref = floor(self.time.min()) tm = QuadraticModelCL(klims=(0.01, 0.60)) if self.use_opencl else QuadraticModel(interpolate=False) self.lpf = lpf = SearchLPF(times=self.time, fluxes=self.fobs, epochs=epochs, tm=tm, nsamples=self.nsamples, exptimes=self.exptime, tref=tref) # TODO: V-shaped transits are not always modelled well. Need to set smarter priors (or starting population) # for the impact parameter and stellar density. lpf.set_prior('rho', 'UP', 0.01, 25) if self.mode == 'all': d = min(self.ts.depth, 0.75) lpf.set_prior('tc', 'NP', self.ts.zero_epoch, 0.01) lpf.set_prior('p', 'NP', self.ts.period, 0.001) lpf.set_prior('k2', 'UP', max(0.01**2, 0.5*d), min(max(0.08**2, 4*d), 0.75**2)) else: pr = self.ts.tf_all.parameters lpf.set_prior('tc', 'NP', pr.tc.med, 5*pr.tc.err) lpf.set_prior('p', 'NP', pr.p.med, pr.p.err) lpf.set_prior('k2', 'UP', max(0.01**2, 0.5 * pr.k2.med), max(0.08**2, min(0.6**2, 2 * pr.k2.med))) lpf.set_prior('q1', 'NP', pr.q1.med, pr.q1.err) lpf.set_prior('q2', 'NP', pr.q2.med, pr.q2.err) # TODO: The limb darkening table has been computed for TESS. Needs to be made flexible. if self.ts.teff is not None: ldcs = Table.read(Path(__file__).parent / "data/ldc_table.fits").to_pandas() ip = interp1d(ldcs.teff, ldcs[['q1', 'q2']].T) q1, q2 = ip(clip(self.ts.teff, 2000., 12000.)) lpf.set_prior('q1', 'NP', q1, 1e-5) lpf.set_prior('q2', 'NP', q2, 1e-5) if initialize_only: return else: lpf.optimize_global(niter=de_niter, npop=npop, use_tqdm=self.use_tqdm, plot_convergence=False) lpf.sample_mcmc(mcmc_niter, repeats=mcmc_repeats, use_tqdm=self.use_tqdm, leave=False) df = lpf.posterior_samples(derived_parameters=True) df = pd.DataFrame((df.median(), df.std()), index='med err'.split()) pv = lpf.posterior_samples(derived_parameters=False).median().values self.phase = fold(self.time, pv[1], pv[0], 0.5) * pv[1] - 0.5 * pv[1] self.fmod = lpf.flux_model(pv) self.ftra = lpf.transit_model(pv) self.fbase = lpf.baseline(pv) # Calculate the per-orbit log likelihood differences # -------------------------------------------------- ues = unique(epochs) lnl = zeros(ues.size) err = 10 ** pv[7] def lnlike_normal(o, m, e): npt = o.size return -npt * log(e) - 0.5 * npt * log(2. * pi) - 0.5 * sum((o - m) ** 2 / e ** 2) for i, e in enumerate(ues): m = epochs == e lnl[i] = lnlike_normal(self.fobs[m], self.fmod[m], err) - lnlike_normal(self.fobs[m], 1.0, err) self.parameters = df self.dll_epochs = ues self.dll_values = lnl self.zero_epoch = df.tc.med self.period = df.p.med self.duration = df.t14.med self.depth = df.k2.med if self.mode == 'all': self.delta_bic = self.ts.dbic = delta_bic(lnl.sum(), 0, 9, self.time.size) self.ts.update_ephemeris(self.zero_epoch, self.period, self.duration, self.depth)
def plot_gb_transits(self, method='de', pv: ndarray = None, remove_baseline: bool = True, figsize: tuple = (14, 2), axes=None, ncol: int = 4, xlim: tuple = None, ylim: tuple = None, nsamples: int = 200): if pv is None: if method == 'de': if self.de is None: raise ValueError( "The global optimizer hasn't been initialized.") pvp = None pv = self.de.minimum_location elif method == 'mcmc': if self.sampler is None: raise ValueError("The sampler hasn't been initialized.") df = self.posterior_samples(derived_parameters=False) pvp = permutation(df.values)[:nsamples, :] pv = median(pvp, 0) else: if pv.ndim == 1: pvp = None pv = pv else: pvp = permutation(pv)[:nsamples, :] pv = median(pvp, 0) if pvp is None: if remove_baseline: fobs = self.ofluxa / squeeze(self.baseline(pv)) fmodel = squeeze(self.transit_model(pv)) fbasel = ones_like(self.ofluxa) else: fobs = self.ofluxa fmodel = squeeze(self.flux_model(pv)) fbasel = squeeze(self.baseline(pv)) fmodel_limits = None else: if remove_baseline: fobs = self.ofluxa / squeeze(self.baseline(pv)) fmodels = percentile(self.transit_model(pvp), [50, 16, 84, 2.5, 97.5], 0) fbasel = ones_like(self.ofluxa) else: fobs = self.ofluxa fmodels = percentile(self.flux_model(pvp), [50, 16, 84, 2.5, 97.5], 0) fbasel = median(self.baseline(pvp), 0) fmodel = fmodels[0] fmodel_limits = fmodels[1:] tcids = [ self.ps.names.index(f'tc_{i + 1}') for i in range(self.nplanets) ] prids = [ self.ps.names.index(f'p_{i + 1}') for i in range(self.nplanets) ] t0s = pv[tcids] prs = pv[prids] tcs = array([t.mean() for t in self.times[self._stess:]]) tds = array([ abs(fold(tcs, prs[i], t0s[i], 0.5) - 0.5) for i in range(self.nplanets) ]) pids = argmin(tds, 0) nlc = self.nlc - self._stess nrow = int(ceil(nlc / ncol)) if axes is None: fig, axs = subplots(nrow, ncol, figsize=figsize, sharex='all', sharey='all', squeeze=False) else: fig, axs = None, axes [ax.autoscale(enable=True, axis='x', tight=True) for ax in axs.flat] etess = self._stess for iax, i in enumerate(range(self.nlc - etess)): ax = axs.flat[iax] sl = self.lcslices[etess + i] t = self.times[etess + i] e = epoch(t.mean(), t0s[pids[i]], prs[pids[i]]) tc = t0s[pids[i]] + e * prs[pids[i]] tt = 24 * (t - tc) if fmodel_limits is not None: ax.fill_between(tt, fmodel_limits[2, sl], fmodel_limits[3, sl], facecolor='blue', alpha=0.15) ax.fill_between(tt, fmodel_limits[0, sl], fmodel_limits[1, sl], facecolor='darkblue', alpha=0.25) ax.plot(tt, fobs[sl], 'k.', alpha=0.2) ax.plot(tt, fmodel[sl], 'k') setp(axs, xlim=xlim, ylim=ylim) setp(axs[-1, :], xlabel='Time - T$_c$ [h]') setp(axs[:, 0], ylabel='Normalised flux') fig.tight_layout() return fig
def plot_gb_transits(self, solution: str = 'de', pv: ndarray = None, figsize: tuple = None, axes=None, ncol: int = 4, xlim: tuple = None, ylim: tuple = None, remove_baseline: bool = True, n_samples: int = 1500): solution = solution.lower() samples = None if pv is None: if solution == 'local': pv = self._local_minimization.x elif solution in ('de', 'global'): solution = 'global' pv = self.de.minimum_location elif solution in ('mcmc', 'mc'): solution = 'mcmc' samples = self.posterior_samples(derived_parameters=False) samples = permutation(samples.values)[:n_samples] pv = median(samples, 0) else: raise NotImplementedError("'solution' should be either 'local', 'global', or 'mcmc'") nlc = self.nlc - self._stess nrow = int(ceil(nlc / ncol)) if axes is None: fig, axs = subplots(nrow, ncol, figsize=figsize, sharex='all', sharey='all', squeeze=False) else: fig, axs = None, axes [ax.autoscale(enable=True, axis='x', tight=True) for ax in axs.flat] if remove_baseline: if solution == 'mcmc': fbasel = median(self.baseline(samples), axis=0) fmodel, fmodm, fmodp = percentile(self.transit_model(samples), [50, 0.5, 99.5], axis=0) else: fbasel = squeeze(self.baseline(pv)) fmodel, fmodm, fmodp = squeeze(self.transit_model(pv)), None, None fobs = self.ofluxa / fbasel else: if solution == 'mcmc': fbasel = median(self.baseline(samples), axis=0) fmodel, fmodm, fmodp = percentile(self.flux_model(samples), [50, 1, 99], axis=0) else: fbasel = squeeze(self.baseline(pv)) fmodel, fmodm, fmodp = squeeze(self.flux_model(pv)), None, None fobs = self.ofluxa etess = self._stess t0, p = pv[[0, 1]] for i in range(nlc): ax = axs.flat[i] sl = self.lcslices[etess + i] t = self.times[etess + i] e = epoch(t.mean(), t0, p) tc = t0 + e * p tt = 24 * (t - tc) ax.plot(tt, fobs[sl], 'k.', alpha=0.2) ax.plot(tt, fmodel[sl], 'k') if solution == 'mcmc': ax.fill_between(tt, fmodm[sl], fmodp[sl], zorder=-100, alpha=0.2, fc='k') if not remove_baseline: ax.plot(tt, fbasel[sl], 'k--', alpha=0.2) setp(axs, xlim=xlim, ylim=ylim) setp(axs[-1, :], xlabel='Time - T$_c$ [h]') setp(axs[:, 0], ylabel='Normalised flux') if fig is not None: fig.tight_layout() return fig