def cov(self, x1, x2=None): if x2 is None: xx = self.metric.gram(x1, x1) debug(tt.arcsin(2*xx/((1 + 2*xx)**2)), 'xx') return self.var * tt.arcsin(2*xx/((1 + 2*xx)**2)) else: return self.var * tt.arcsin(2*self.metric.gram(x1, x2)/((1 + 2*self.metric.gram(x1, x1))*(1 + 2*self.metric.gram(x2, x2))))
def get_stencil(self, t, r=None, texp=None): if r is None or texp is None: return tt.shape_padright(t) z = tt.zeros_like(self.a) r = tt.as_tensor_variable(r) R = self.r_star + z hp = 0.5 * self.period if self.ecc is None: # Equation 14 from Winn (2010) k = r / self.r_star arg1 = tt.square(1 + k) - tt.square(self.b) arg2 = tt.square(1 - k) - tt.square(self.b) factor = R / (self.a * self.sin_incl) hdur1 = hp * tt.arcsin(factor * tt.sqrt(arg1)) / np.pi hdur2 = hp * tt.arcsin(factor * tt.sqrt(arg2)) / np.pi ts = [-hdur1, -hdur2, hdur2, hdur1] flag = z else: M_contact1 = self.contact_points_op(self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R + r) M_contact2 = self.contact_points_op(self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R - r) flag = M_contact1[2] + M_contact2[2] ts = [ tt.mod( (M_contact1[0] - self.M0) / self.n + hp, self.period) - hp, tt.mod( (M_contact2[0] - self.M0) / self.n + hp, self.period) - hp, tt.mod( (M_contact2[1] - self.M0) / self.n + hp, self.period) - hp, tt.mod( (M_contact1[1] - self.M0) / self.n + hp, self.period) - hp ] start = self.period * tt.floor((tt.min(t) - self.t0) / self.period) end = self.period * (tt.ceil((tt.max(t) - self.t0) / self.period) + 1) start += self.t0 end += self.t0 tout = [] for i in range(4): if z.ndim < 1: tout.append(ts[i] + tt.arange(start, end, self.period)) else: tout.append( theano.scan( fn=lambda t0, s0, e0, p0: t0 + tt.arange(s0, e0, p0), sequences=[ts[i], start, end, self.period], )[0].flatten()) ts = tt.sort(tt.concatenate(tout)) return ts, flag
def get_stencil(self, t, r=None, texp=None): if r is None or texp is None: return tt.shape_padright(t) z = tt.zeros_like(self.a) r = tt.as_tensor_variable(r) R = self.r_star + z hp = 0.5 * self.period if self.ecc is None: # Equation 14 from Winn (2010) k = r / self.r_star arg1 = tt.square(1 + k) - tt.square(self.b) arg2 = tt.square(1 - k) - tt.square(self.b) factor = R / (self.a * self.sin_incl) hdur1 = hp * tt.arcsin(factor * tt.sqrt(arg1)) / np.pi hdur2 = hp * tt.arcsin(factor * tt.sqrt(arg2)) / np.pi ts = [-hdur1, -hdur2, hdur2, hdur1] flag = z else: M_contact1 = self.contact_points_op( self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R + r) M_contact2 = self.contact_points_op( self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R - r) flag = M_contact1[2] + M_contact2[2] ts = [ tt.mod((M_contact1[0]-self.M0)/self.n+hp, self.period)-hp, tt.mod((M_contact2[0]-self.M0)/self.n+hp, self.period)-hp, tt.mod((M_contact2[1]-self.M0)/self.n+hp, self.period)-hp, tt.mod((M_contact1[1]-self.M0)/self.n+hp, self.period)-hp ] start = self.period * tt.floor((tt.min(t) - self.t0) / self.period) end = self.period * (tt.ceil((tt.max(t) - self.t0) / self.period) + 1) start += self.t0 end += self.t0 tout = [] for i in range(4): if z.ndim < 1: tout.append(ts[i] + tt.arange(start, end, self.period)) else: tout.append(theano.scan( fn=lambda t0, s0, e0, p0: t0 + tt.arange(s0, e0, p0), sequences=[ts[i], start, end, self.period], )[0].flatten()) ts = tt.sort(tt.concatenate(tout)) return ts, flag
def in_transit(self, t, r=0.0, texp=None): """Get a list of timestamps that are in transit Args: t (vector): A vector of timestamps to be evaluated. r (Optional): The radii of the planets. texp (Optional[float]): The exposure time. Returns: The indices of the timestamps that are in transit. """ z = tt.zeros_like(self.a) r = tt.as_tensor_variable(r) + z R = self.r_star + z # Wrap the times into time since transit hp = 0.5 * self.period dt = tt.mod(self._warp_times(t) + hp, self.period) - hp if self.ecc is None: # Equation 14 from Winn (2010) k = r / R arg = tt.square(1 + k) - tt.square(self.b) factor = R / (self.a * self.sin_incl) hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi t_start = -hdur t_end = hdur flag = z else: M_contact = self.contact_points_op( self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R + r, ) flag = M_contact[2] t_start = (M_contact[0] - self.M0) / self.n t_start = tt.mod(t_start + hp, self.period) - hp t_end = (M_contact[1] - self.M0) / self.n t_end = tt.mod(t_end + hp, self.period) - hp t_start = tt.switch(tt.gt(t_start, 0.0), t_start - self.period, t_start) t_end = tt.switch(tt.lt(t_end, 0.0), t_end + self.period, t_end) if texp is not None: t_start -= 0.5 * texp t_end += 0.5 * texp mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1) result = ifelse(tt.all(tt.eq(flag, 0)), tt.arange(t.size)[mask], tt.arange(t.size)) return result
def in_transit(self, t, r=0.0, texp=None): """Get a list of timestamps that are in transit Args: t (vector): A vector of timestamps to be evaluated. r (Optional): The radii of the planets. texp (Optional[float]): The exposure time. Returns: The indices of the timestamps that are in transit. """ z = tt.zeros_like(self.a) r = tt.as_tensor_variable(r) + z R = self.r_star + z # Wrap the times into time since transit hp = 0.5 * self.period dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp if self.ecc is None: # Equation 14 from Winn (2010) k = r / R arg = tt.square(1 + k) - tt.square(self.b) factor = R / (self.a * self.sin_incl) hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi t_start = -hdur t_end = hdur flag = z else: M_contact = self.contact_points_op( self.a, self.ecc, self.cos_omega, self.sin_omega, self.cos_incl + z, self.sin_incl + z, R + r) flag = M_contact[2] t_start = (M_contact[0] - self.M0) / self.n t_start = tt.mod(t_start + hp, self.period) - hp t_end = (M_contact[1] - self.M0) / self.n t_end = tt.mod(t_end + hp, self.period) - hp t_start = tt.switch(tt.gt(t_start, 0.0), t_start - self.period, t_start) t_end = tt.switch(tt.lt(t_end, 0.0), t_end + self.period, t_end) if texp is not None: t_start -= 0.5*texp t_end += 0.5*texp mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1) result = ifelse(tt.all(tt.eq(flag, 0)), tt.arange(t.size)[mask], tt.arange(t.size)) return result
def th_haversine(): """Returns a reference to the compiled Haversine distance function""" from theano import tensor as T from theano import function from .vectorops import floatX coords1 = T.matrix("Coords1", dtype=floatX) coords2 = T.matrix("Coords2", dtype=floatX) R = np.array([6367], dtype="int32") # Approximate radius of Mother Earth in kms coords1 = T.deg2rad(coords1) coords2 = T.deg2rad(coords2) lon1, lat1 = coords1[:, 0], coords1[:, 1] lon2, lat2 = coords2[:, 0], coords2[:, 1] dlon = lon1 - lon2 dlat = lat1 - lat2 d = T.sin(dlat / 2) ** 2 + T.cos(lat1) * T.cos(lat2) * T.sin(dlon / 2) ** 2 e = 2 * T.arcsin(T.sqrt(d)) d_haversine = e * R f_ = function([coords1, coords2], outputs=d_haversine) return f_
def __call__(self, x1, x2): return self.var * tt.arcsin(2*self.metric(x1, x2)/((1 + 2*self.metric(x1, x1))*(1 + 2*self.metric(x2, x2))))
def _get_compiled_theano_functions(N_QUAD_PTS): # Planet masses: m1,m2 m1, m2 = T.dscalars(2) mstar = 1 mu1 = m1 * mstar / (mstar + m1) mu2 = m2 * mstar / (mstar + m2) Mstar1 = mstar + m1 Mstar2 = mstar + m2 beta1 = mu1 * T.sqrt(Mstar1 / mstar) / (mu1 + mu2) beta2 = mu2 * T.sqrt(Mstar2 / mstar) / (mu1 + mu2) j, k = T.lscalars('jk') s = (j - k) / k # Angle variable for averaging over psi = T.dvector() # Quadrature weights quad_weights = T.dvector('w') # Dynamical variables: Ndof = 3 Nconst = 1 dyvars = T.vector() y1, y2, y_inc, x1, x2, x_inc, amd = [ dyvars[i] for i in range(2 * Ndof + Nconst) ] a20 = T.constant(1.) a10 = ((j - k) / j)**(2 / 3) * (Mstar1 / Mstar2)**(1 / 3) L10 = beta1 * T.sqrt(a10) L20 = beta2 * T.sqrt(a20) Ltot = L10 + L20 f = L10 / L20 L2res = (Ltot + amd) / (1 + f) Psi = -k * (s * L2res + (1 + s) * f * L2res) ### # actions ### I1 = 0.5 * (x1 * x1 + y1 * y1) I2 = 0.5 * (x2 * x2 + y2 * y2) Phi = 0.5 * (x_inc * x_inc + y_inc * y_inc) L1 = -s * Ltot - Psi / k - s * (I1 + I2 + Phi) L2 = (1 + s) * Ltot + Psi / k + (1 + s) * (I1 + I2 + Phi) # Set lambda2=0 l2 = T.constant(0.) l1 = -1 * k * psi theta_res = (1 + s) * l2 - s * l1 cos_theta_res = T.cos(theta_res) sin_theta_res = T.sin(theta_res) kappa1 = x1 * cos_theta_res + y1 * sin_theta_res eta1 = y1 * cos_theta_res - x1 * sin_theta_res kappa2 = x2 * cos_theta_res + y2 * sin_theta_res eta2 = y2 * cos_theta_res - x2 * sin_theta_res sigma = x_inc * cos_theta_res + y_inc * sin_theta_res rho = y_inc * cos_theta_res - x_inc * sin_theta_res # y = (sigma-i*rho)/sqrt(2) # = sqrt(Phi) * exp[i (Omega1+Omega2) / 2] # Malige+ 2002, Eqs 20 and 21 r2byr1 = (L2 - L1 - I2 + I1) / Ltot sigma1 = rho * T.sqrt(1 + r2byr1) / T.sqrt(2) sigma2 = -rho * T.sqrt(1 - r2byr1) / T.sqrt(2) rho1 = -sigma * T.sqrt(1 + r2byr1) / T.sqrt(2) rho2 = sigma * T.sqrt(1 - r2byr1) / T.sqrt(2) Xre1 = kappa1 / T.sqrt(L1) Xim1 = -eta1 / T.sqrt(L1) Yre1 = 0.5 * sigma1 / T.sqrt(L1) Yim1 = -0.5 * rho1 / T.sqrt(L1) Xre2 = kappa2 / T.sqrt(L2) Xim2 = -eta2 / T.sqrt(L2) Yre2 = 0.5 * sigma2 / T.sqrt(L2) Yim2 = -0.5 * rho2 / T.sqrt(L2) absX1_sq = 2 * I1 / L1 absX2_sq = 2 * I2 / L2 X_to_z1 = T.sqrt(1 - absX1_sq / 4) X_to_z2 = T.sqrt(1 - absX2_sq / 4) Y_to_zeta1 = 1 / T.sqrt(1 - absX1_sq / 2) Y_to_zeta2 = 1 / T.sqrt(1 - absX2_sq / 2) a1 = (L1 / beta1)**2 k1 = Xre1 * X_to_z1 h1 = Xim1 * X_to_z1 q1 = Yre1 * Y_to_zeta1 p1 = Yim1 * Y_to_zeta1 e1 = T.sqrt(absX1_sq) * X_to_z1 inc1 = 2 * T.arcsin(T.sqrt(p1 * p1 + q1 * q1)) a2 = (L2 / beta2)**2 k2 = Xre2 * X_to_z2 h2 = Xim2 * X_to_z2 q2 = Yre2 * Y_to_zeta2 p2 = Yim2 * Y_to_zeta2 e2 = T.sqrt(absX2_sq) * X_to_z2 inc2 = 2 * T.arcsin(T.sqrt(p2 * p2 + q2 * q2)) beta1p = T.sqrt(Mstar1) * beta1 beta2p = T.sqrt(Mstar2) * beta2 Hkep = -0.5 * beta1p / a1 - 0.5 * beta2p / a2 Hdir, Hind = calc_Hint_components_spatial(a1, a2, l1, l2, h1, k1, h2, k2, p1, q1, p2, q2, Mstar1, Mstar2) eps = m1 * m2 / (mu1 + mu2) / T.sqrt(mstar) Hpert = (Hdir + Hind / mstar) Hpert_av = Hpert.dot(quad_weights) Htot = Hkep + eps * Hpert_av ##################################################### # Set parameters for compiling functions with Theano ##################################################### # Get numerical quadrature nodes and weights nodes, weights = np.polynomial.legendre.leggauss(N_QUAD_PTS) # Rescale for integration interval from [-1,1] to [-pi,pi] nodes = nodes * np.pi weights = weights * 0.5 # 'givens' will fix some parameters of Theano functions compiled below givens = [(psi, nodes), (quad_weights, weights)] # 'ins' will set the inputs of Theano functions compiled below # Note: 'extra_ins' will be passed as values of object attributes # of the 'ResonanceEquations' class 'defined below extra_ins = [m1, m2, j, k] ins = [dyvars] + extra_ins Stilde = Phi * (L2 - I2 - L1 + I1) / (Ltot) Q1 = 0.5 * (Phi + Stilde) Q2 = 0.5 * (Phi - Stilde) inc1 = T.arccos(1 - Q1 / (L1 - I1)) inc2 = T.arccos(1 - Q2 / (L2 - I2)) orbels = [ a1, e1, inc1, k * T.arctan2(y1, x1), a2, e2, inc2, k * T.arctan2(y2, x2), T.arctan2(y_inc, x_inc) ] orbels_dict = dict( zip([ 'a1', 'e1', 'inc1', 'theta1', 'a2', 'e2', 'inc2', 'theta2', 'phi' ], orbels)) actions = [L1, L2, I1, I2, Q1, Q2] actions_dict = dict( zip(['L1', 'L2', 'Gamma1', 'Gamma2', 'Q1', 'Q2'], actions)) # Conservative flow gradHtot = T.grad(Htot, wrt=dyvars) gradHpert = T.grad(Hpert_av, wrt=dyvars) gradHkep = T.grad(Hkep, wrt=dyvars) hessHtot = theano.gradient.hessian(Htot, wrt=dyvars) hessHpert = theano.gradient.hessian(Hpert_av, wrt=dyvars) hessHkep = theano.gradient.hessian(Hkep, wrt=dyvars) Jtens = T.as_tensor(np.pad(getOmegaMatrix(Ndof), (0, Nconst), 'constant')) H_flow_vec = Jtens.dot(gradHtot) Hpert_flow_vec = Jtens.dot(gradHpert) Hkep_flow_vec = Jtens.dot(gradHkep) H_flow_jac = Jtens.dot(hessHtot) Hpert_flow_jac = Jtens.dot(hessHpert) Hkep_flow_jac = Jtens.dot(hessHkep) ########################## # Compile Theano functions ########################## func_dict = { # Hamiltonians 'H': Htot, #'Hpert':Hpert_av, #'Hkep':Hkep, ## Hamiltonian flows 'H_flow': H_flow_vec, #'Hpert_flow':Hpert_flow_vec, #'Hkep_flow':Hkep_flow_vec, ## Hamiltonian flow Jacobians 'H_flow_jac': H_flow_jac, #'Hpert_flow_jac':Hpert_flow_jac, #'Hkep_flow_jac':Hkep_flow_jac, ## Extras 'orbital_elements': orbels_dict, 'actions': actions_dict } compiled_func_dict = dict() with tqdm(func_dict.items()) as t: for key, val in t: t.set_description("Compiling '{}'".format(key)) if key is 'timescales': inputs = extra_ins else: inputs = ins cf = theano.function(inputs=inputs, outputs=val, givens=givens, on_unused_input='ignore') compiled_func_dict[key] = cf return compiled_func_dict
data = pd.read_csv(io.StringIO(golf_data), sep=" ") #model-inference coords = {"distance": data.distance} fileName='golf_geometry_PyMC3' samples=2000 chains=2 tune=1000 geometry_model=pm.Model(coords=coords) with geometry_model: #to store the n-parameter of Binomial dist #in the constant group of ArviZ InferenceData #You should always call it n for imd to retrieve it n = pm.Data('n', data.tries) sigma_angle = pm.HalfNormal('sigma_angle') p_goes_in = pm.Deterministic('p_goes_in', 2 * Phi(tt.arcsin((CUP_RADIUS - BALL_RADIUS) / data.distance) / sigma_angle) - 1, dims='distance') successes = pm.Binomial('successes', n=n, p=p_goes_in, observed=data.successes, dims='distance') #inference trace_g = pm.sample(draws=samples, chains=chains, tune=tune) prior_g= pm.sample_prior_predictive(samples=samples) posterior_predictive_g = pm.sample_posterior_predictive(trace_g,samples=samples) ## STEP 1 # will also capture all the sampler statistics data_g = az.from_pymc3(trace=trace_g, prior=prior_g, posterior_predictive=posterior_predictive_g) ## STEP 2 #dag dag_g = get_dag(geometry_model) # insert dag into sampler stat attributes data_g.sample_stats.attrs["graph"] = str(dag_g)
def bound_loss(x, tnp=np): eps = 1e-9 loss = tnp.maximum(tnp.maximum(eps, x - 1), tnp.maximum(eps, -x)) + eps return tnp.maximum(loss, eps) + eps params_plus1 = shared(np.random.rand(nbatch, 2)) params_plus2 = shared(np.random.rand(nbatch, 2)) a = Print("invplus(x, params_plus1")(invplus(x, params_plus1)) b = Print("invplus(x, params_plus2")(invplus(y, params_plus2)) eps = 1e-9 a2 = T.arccos(T.clip(a, eps, 1 - eps)) b2 = T.arcsin(T.clip(b, eps, 1 - eps)) bl1 = bound_loss(a, tnp=T) bl2 = bound_loss(b, tnp=T) phi1_group = [] phi2_group = [] phi3_group = [] phi1_group.append(a2[:, 0]) phi1_group.append(b2[:, 0]) delta_group = [] delta_group.append(a2[:, 1]) delta_group.append(b2[:, 1]) delta_single, delta_var = singular(delta_group, name="delta") params_plus3 = shared(np.random.rand(nbatch, 1))
def run_onetransit_inference(self, prior_d, pklpath, make_threadsafe=True): """ Similar to "run_transit_inference", but with more restrictive priors on ephemeris. Also, it simultaneously fits for quadratic trend. """ # if the model has already been run, pull the result from the # pickle. otherwise, run it. if os.path.exists(pklpath): d = pickle.load(open(pklpath, 'rb')) self.model = d['model'] self.trace = d['trace'] self.map_estimate = d['map_estimate'] return 1 with pm.Model() as model: assert len(self.data.keys()) == 1 name = list(self.data.keys())[0] x_obs = list(self.data.values())[0][0] y_obs = list(self.data.values())[0][1] y_err = list(self.data.values())[0][2] t_exp = list(self.data.values())[0][3] # Fixed data errors. sigma = y_err # Define priors and PyMC3 random variables to sample over. # Stellar parameters. (Following tess.world notebooks). logg_star = pm.Normal("logg_star", mu=LOGG, sd=LOGG_STDEV) r_star = pm.Bound(pm.Normal, lower=0.0)("r_star", mu=RSTAR, sd=RSTAR_STDEV) rho_star = pm.Deterministic("rho_star", factor * 10**logg_star / r_star) # Transit parameters. t0 = pm.Normal("t0", mu=prior_d['t0'], sd=1e-3, testval=prior_d['t0']) period = pm.Normal('period', mu=prior_d['period'], sd=3e-4, testval=prior_d['period']) # NOTE: might want to implement kwarg for flexibility # u = xo.distributions.QuadLimbDark( # "u", testval=prior_d['u'] # ) u0 = pm.Uniform('u[0]', lower=prior_d['u[0]'] - 0.15, upper=prior_d['u[0]'] + 0.15, testval=prior_d['u[0]']) u1 = pm.Uniform('u[1]', lower=prior_d['u[1]'] - 0.15, upper=prior_d['u[1]'] + 0.15, testval=prior_d['u[1]']) u = [u0, u1] # # The Espinoza (2018) parameterization for the joint radius ratio and # # impact parameter distribution # r, b = xo.distributions.get_joint_radius_impact( # min_radius=0.001, max_radius=1.0, # testval_r=prior_d['r'], # testval_b=prior_d['b'] # ) # # NOTE: apparently, it's been deprecated. DFM's manuscript notes # that it leads to Rp/Rs values biased high log_r = pm.Uniform('log_r', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r']) r = pm.Deterministic('r', tt.exp(log_r)) b = xo.distributions.ImpactParameter("b", ror=r, testval=prior_d['b']) # the transit orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b, rho_star=rho_star) transit_lc = pm.Deterministic( 'transit_lc', xo.LimbDarkLightCurve(u).get_light_curve( orbit=orbit, r=r, t=x_obs, texp=t_exp).T.flatten()) # quadratic trend parameters mean = pm.Normal(f"{name}_mean", mu=prior_d[f'{name}_mean'], sd=1e-2, testval=prior_d[f'{name}_mean']) a1 = pm.Normal(f"{name}_a1", mu=prior_d[f'{name}_a1'], sd=1, testval=prior_d[f'{name}_a1']) a2 = pm.Normal(f"{name}_a2", mu=prior_d[f'{name}_a2'], sd=1, testval=prior_d[f'{name}_a2']) _tmid = np.nanmedian(x_obs) lc_model = pm.Deterministic( 'mu_transit', mean + a1 * (x_obs - _tmid) + a2 * (x_obs - _tmid)**2 + transit_lc) roughdepth = pm.Deterministic(f'roughdepth', pm.math.abs_(transit_lc).max()) # # Derived parameters # # planet radius in jupiter radii r_planet = pm.Deterministic( "r_planet", (r * r_star) * (1 * units.Rsun / (1 * units.Rjup)).cgs.value) # # eq 30 of winn+2010, ignoring planet density. # a_Rs = pm.Deterministic("a_Rs", (rho_star * period**2)**(1 / 3) * (((1 * units.gram / (1 * units.cm)**3) * (1 * units.day**2) * const.G / (3 * np.pi))**(1 / 3)).cgs.value) # # cosi. assumes e=0 (e.g., Winn+2010 eq 7) # cosi = pm.Deterministic("cosi", b / a_Rs) # safer than tt.arccos(cosi) sini = pm.Deterministic("sini", pm.math.sqrt(1 - cosi**2)) # # transit durations (T_14, T_13) for circular orbits. Winn+2010 Eq 14, 15. # units: hours. # T_14 = pm.Deterministic('T_14', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 + r)**2 - b**2) * (1 / sini)) * 24) T_13 = pm.Deterministic('T_13', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 - r)**2 - b**2) * (1 / sini)) * 24) # # mean model and likelihood # # mean_model = mu_transit + mean # mu_model = pm.Deterministic('mu_model', lc_model) likelihood = pm.Normal('obs', mu=lc_model, sigma=sigma, observed=y_obs) # Optimizing map_estimate = pm.find_MAP(model=model) # start = model.test_point # if 'transit' in self.modelcomponents: # map_estimate = xo.optimize(start=start, # vars=[r, b, period, t0]) # map_estimate = xo.optimize(start=map_estimate) if make_threadsafe: pass else: # as described in # https://github.com/matplotlib/matplotlib/issues/15410 # matplotlib is not threadsafe. so do not make plots before # sampling, because some child processes tries to close a # cached file, and crashes the sampler. print(map_estimate) # sample from the posterior defined by this model. trace = pm.sample( tune=self.N_samples, draws=self.N_samples, start=map_estimate, cores=self.N_cores, chains=self.N_chains, step=xo.get_dense_nuts_step(target_accept=0.8), ) with open(pklpath, 'wb') as buff: pickle.dump( { 'model': model, 'trace': trace, 'map_estimate': map_estimate }, buff) self.model = model self.trace = trace self.map_estimate = map_estimate
def run_alltransit_inference(self, prior_d, pklpath, make_threadsafe=True): # if the model has already been run, pull the result from the # pickle. otherwise, run it. if os.path.exists(pklpath): d = pickle.load(open(pklpath, 'rb')) self.model = d['model'] self.trace = d['trace'] self.map_estimate = d['map_estimate'] return 1 with pm.Model() as model: # Shared parameters # Stellar parameters. (Following tess.world notebooks). logg_star = pm.Normal("logg_star", mu=LOGG, sd=LOGG_STDEV) r_star = pm.Bound(pm.Normal, lower=0.0)("r_star", mu=RSTAR, sd=RSTAR_STDEV) rho_star = pm.Deterministic("rho_star", factor * 10**logg_star / r_star) # fix Rp/Rs across bandpasses, b/c you're assuming it's a planet if 'quaddepthvar' not in self.modelid: log_r = pm.Uniform('log_r', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r']) r = pm.Deterministic('r', tt.exp(log_r)) else: log_r_Tband = pm.Uniform('log_r_Tband', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r_Tband']) r_Tband = pm.Deterministic('r_Tband', tt.exp(log_r_Tband)) log_r_Rband = pm.Uniform('log_r_Rband', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r_Rband']) r_Rband = pm.Deterministic('r_Rband', tt.exp(log_r_Rband)) log_r_Bband = pm.Uniform('log_r_Bband', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r_Bband']) r_Bband = pm.Deterministic('r_Bband', tt.exp(log_r_Bband)) r = r_Tband # Some orbital parameters t0 = pm.Normal("t0", mu=prior_d['t0'], sd=5e-3, testval=prior_d['t0']) period = pm.Normal('period', mu=prior_d['period'], sd=5e-3, testval=prior_d['period']) b = xo.distributions.ImpactParameter("b", ror=r, testval=prior_d['b']) orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b, rho_star=rho_star) # NOTE: limb-darkening should be bandpass specific, but we don't # have the SNR to justify that, so go with TESS-dominated u0 = pm.Uniform('u[0]', lower=prior_d['u[0]'] - 0.15, upper=prior_d['u[0]'] + 0.15, testval=prior_d['u[0]']) u1 = pm.Uniform('u[1]', lower=prior_d['u[1]'] - 0.15, upper=prior_d['u[1]'] + 0.15, testval=prior_d['u[1]']) u = [u0, u1] star = xo.LimbDarkLightCurve(u) # Loop over "instruments" (TESS, then each ground-based lightcurve) parameters = dict() lc_models = dict() roughdepths = dict() for n, (name, (x, y, yerr, texp)) in enumerate(self.data.items()): # Define per-instrument parameters in a submodel, to not need # to prefix the names. Yields e.g., "TESS_mean", # "elsauce_0_mean", "elsauce_2_a2" with pm.Model(name=name, model=model): # Transit parameters. mean = pm.Normal("mean", mu=prior_d[f'{name}_mean'], sd=1e-2, testval=prior_d[f'{name}_mean']) if 'quad' in self.modelid: if name != 'tess': # units: rel flux per day. a1 = pm.Normal("a1", mu=prior_d[f'{name}_a1'], sd=1, testval=prior_d[f'{name}_a1']) # units: rel flux per day^2. a2 = pm.Normal("a2", mu=prior_d[f'{name}_a2'], sd=1, testval=prior_d[f'{name}_a2']) if self.modelid == 'alltransit': lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + star.get_light_curve( orbit=orbit, r=r, t=x, texp=texp).T.flatten()) elif self.modelid == 'alltransit_quad': if name != 'tess': # midpoint for this definition of the quadratic trend _tmid = np.nanmedian(x) lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + a1 * (x - _tmid) + a2 * (x - _tmid)**2 + star.get_light_curve( orbit=orbit, r=r, t=x, texp=texp).T.flatten()) elif name == 'tess': lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + star.get_light_curve( orbit=orbit, r=r, t=x, texp=texp).T.flatten()) elif self.modelid == 'alltransit_quaddepthvar': if name != 'tess': # midpoint for this definition of the quadratic trend _tmid = np.nanmedian(x) # do custom depth-to- if (name == 'elsauce_20200401' or name == 'elsauce_20200426'): r = r_Rband elif name == 'elsauce_20200521': r = r_Tband elif name == 'elsauce_20200614': r = r_Bband transit_lc = star.get_light_curve( orbit=orbit, r=r, t=x, texp=texp).T.flatten() lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + a1 * (x - _tmid) + a2 * (x - _tmid)**2 + transit_lc) roughdepths[name] = pm.Deterministic( f'{name}_roughdepth', pm.math.abs_(transit_lc).max()) elif name == 'tess': r = r_Tband transit_lc = star.get_light_curve( orbit=orbit, r=r, t=x, texp=texp).T.flatten() lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + transit_lc) roughdepths[name] = pm.Deterministic( f'{name}_roughdepth', pm.math.abs_(transit_lc).max()) # TODO: add error bar fudge likelihood = pm.Normal(f'{name}_obs', mu=lc_models[name], sigma=yerr, observed=y) # # Derived parameters # if self.modelid == 'alltransit_quaddepthvar': r = r_Tband # planet radius in jupiter radii r_planet = pm.Deterministic( "r_planet", (r * r_star) * (1 * units.Rsun / (1 * units.Rjup)).cgs.value) # # eq 30 of winn+2010, ignoring planet density. # a_Rs = pm.Deterministic("a_Rs", (rho_star * period**2)**(1 / 3) * (((1 * units.gram / (1 * units.cm)**3) * (1 * units.day**2) * const.G / (3 * np.pi))**(1 / 3)).cgs.value) # # cosi. assumes e=0 (e.g., Winn+2010 eq 7) # cosi = pm.Deterministic("cosi", b / a_Rs) # probably safer than tt.arccos(cosi) sini = pm.Deterministic("sini", pm.math.sqrt(1 - cosi**2)) # # transit durations (T_14, T_13) for circular orbits. Winn+2010 Eq 14, 15. # units: hours. # T_14 = pm.Deterministic('T_14', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 + r)**2 - b**2) * (1 / sini)) * 24) T_13 = pm.Deterministic('T_13', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 - r)**2 - b**2) * (1 / sini)) * 24) # Optimizing map_estimate = pm.find_MAP(model=model) # start = model.test_point # if 'transit' in self.modelcomponents: # map_estimate = xo.optimize(start=start, # vars=[r, b, period, t0]) # map_estimate = xo.optimize(start=map_estimate) if make_threadsafe: pass else: # NOTE: would usually plot MAP estimate here, but really # there's not a huge need. print(map_estimate) pass # sample from the posterior defined by this model. trace = pm.sample( tune=self.N_samples, draws=self.N_samples, start=map_estimate, cores=self.N_cores, chains=self.N_chains, step=xo.get_dense_nuts_step(target_accept=0.8), ) with open(pklpath, 'wb') as buff: pickle.dump( { 'model': model, 'trace': trace, 'map_estimate': map_estimate }, buff) self.model = model self.trace = trace self.map_estimate = map_estimate
def run_allindivtransit_inference(self, prior_d, pklpath, make_threadsafe=True, target_accept=0.8): # if the model has already been run, pull the result from the # pickle. otherwise, run it. if os.path.exists(pklpath): d = pickle.load(open(pklpath, 'rb')) self.model = d['model'] self.trace = d['trace'] self.map_estimate = d['map_estimate'] return 1 with pm.Model() as model: # Shared parameters # Stellar parameters. (Following tess.world notebooks). logg_star = pm.Normal("logg_star", mu=LOGG, sd=LOGG_STDEV) r_star = pm.Bound(pm.Normal, lower=0.0)("r_star", mu=RSTAR, sd=RSTAR_STDEV) rho_star = pm.Deterministic("rho_star", factor * 10**logg_star / r_star) # fix Rp/Rs across bandpasses, b/c you're assuming it's a planet log_r = pm.Uniform('log_r', lower=np.log(1e-2), upper=np.log(1), testval=prior_d['log_r']) r = pm.Deterministic('r', tt.exp(log_r)) # Some orbital parameters t0 = pm.Normal("t0", mu=prior_d['t0'], sd=1e-1, testval=prior_d['t0']) period = pm.Normal('period', mu=prior_d['period'], sd=1e-1, testval=prior_d['period']) b = xo.distributions.ImpactParameter("b", ror=r, testval=prior_d['b']) orbit = xo.orbits.KeplerianOrbit(period=period, t0=t0, b=b, rho_star=rho_star) # NOTE: limb-darkening should be bandpass specific, but we don't # have the SNR to justify that, so go with TESS-dominated # u = xo.QuadLimbDark("u") # NOTE: deprecated(?) delta_u = 0.15 u0 = pm.Uniform('u[0]', lower=prior_d['u[0]'] - delta_u, upper=prior_d['u[0]'] + delta_u, testval=prior_d['u[0]']) u1 = pm.Uniform('u[1]', lower=prior_d['u[1]'] - delta_u, upper=prior_d['u[1]'] + delta_u, testval=prior_d['u[1]']) u = [u0, u1] star = xo.LimbDarkLightCurve(u) # Loop over "instruments" (TESS, then each ground-based lightcurve) parameters = dict() lc_models = dict() roughdepths = dict() for n, (name, (x, y, yerr, texp)) in enumerate(self.data.items()): if 'tess' in name: delta_trend = 0.100 else: delta_trend = 0.050 # Define per-instrument parameters in a submodel, to not need # to prefix the names. Yields e.g., "TESS_0_mean", # "elsauce_0_mean", "elsauce_2_a2" mean = pm.Normal(f'{name}_mean', mu=prior_d[f'{name}_mean'], sd=1e-2, testval=prior_d[f'{name}_mean']) a1 = pm.Uniform(f'{name}_a1', lower=-delta_trend, upper=delta_trend, testval=prior_d[f'{name}_a1']) a2 = pm.Uniform(f'{name}_a2', lower=-delta_trend, upper=delta_trend, testval=prior_d[f'{name}_a2']) # midpoint for this definition of the quadratic trend _tmid = np.nanmedian(x) transit_lc = star.get_light_curve(orbit=orbit, r=r, t=x, texp=texp).T.flatten() lc_models[name] = pm.Deterministic( f'{name}_mu_transit', mean + a1 * (x - _tmid) + a2 * (x - _tmid)**2 + transit_lc) roughdepths[name] = pm.Deterministic( f'{name}_roughdepth', pm.math.abs_(transit_lc).max()) # NOTE: might want error bar fudge. likelihood = pm.Normal(f'{name}_obs', mu=lc_models[name], sigma=yerr, observed=y) # # Derived parameters # # planet radius in jupiter radii r_planet = pm.Deterministic( "r_planet", (r * r_star) * (1 * units.Rsun / (1 * units.Rjup)).cgs.value) # # eq 30 of winn+2010, ignoring planet density. # a_Rs = pm.Deterministic("a_Rs", (rho_star * period**2)**(1 / 3) * (((1 * units.gram / (1 * units.cm)**3) * (1 * units.day**2) * const.G / (3 * np.pi))**(1 / 3)).cgs.value) # # cosi. assumes e=0 (e.g., Winn+2010 eq 7) # cosi = pm.Deterministic("cosi", b / a_Rs) # probably safer than tt.arccos(cosi) sini = pm.Deterministic("sini", pm.math.sqrt(1 - cosi**2)) # # transit durations (T_14, T_13) for circular orbits. Winn+2010 Eq 14, 15. # units: hours. # T_14 = pm.Deterministic('T_14', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 + r)**2 - b**2) * (1 / sini)) * 24) T_13 = pm.Deterministic('T_13', (period / np.pi) * tt.arcsin( (1 / a_Rs) * pm.math.sqrt((1 - r)**2 - b**2) * (1 / sini)) * 24) map_estimate = pm.find_MAP(model=model) # if make_threadsafe: # pass # else: # # NOTE: would usually plot MAP estimate here, but really # # there's not a huge need. # print(map_estimate) # for k,v in map_estimate.items(): # if 'transit' not in k: # print(k, v) # NOTE: could start at map_estimate, which currently is not being # used for anything. start = model.test_point trace = pm.sample( tune=self.N_samples, draws=self.N_samples, start=start, cores=self.N_cores, chains=self.N_chains, step=xo.get_dense_nuts_step(target_accept=target_accept), ) with open(pklpath, 'wb') as buff: pickle.dump( { 'model': model, 'trace': trace, 'map_estimate': map_estimate }, buff) self.model = model self.trace = trace self.map_estimate = map_estimate
def asin(x): return T.arcsin(x)
def _compile_model(self): """ theano implementation of 3C model """ ### GC90 atmospheric model implementation theta_sun, beta, alpha, am, rh, pressure = T.scalars( 'theta_sun', 'beta', 'alpha', 'am', 'rh', 'pressure') wl = T.vector('wl') wl_a = 550 theta_sun_ = theta_sun * np.pi / 180. z3 = -0.1417 * alpha + 0.82 z2 = ifelse(T.gt(alpha, 1.2), 0.65, z3) z1 = ifelse(T.lt(alpha, 0), 0.82, z2) theta_sun_mean = z1 B3 = T.log(1 - theta_sun_mean) B2 = B3 * (0.0783 + B3 * (-0.3824 - 0.5874 * B3)) B1 = B3 * (1.459 + B3 * (0.1595 + 0.4129 * B3)) Fa = 1 - 0.5 * T.exp((B1 + B2 * T.cos(theta_sun_)) * T.cos(theta_sun_)) omega_a = (-0.0032 * am + 0.972) * T.exp(3.06 * 1e-4 * rh) tau_a = beta * (wl / wl_a)**(-alpha) # fixed a bug in M, thanks Jaime! [brackets added] M = 1 / (T.cos(theta_sun_) + 0.50572 * (90 + 6.07995 - theta_sun)**(-1.6364)) M_ = M * pressure / 1013.25 Tr = T.exp(-M_ / (115.6406 * (wl / 1000)**4 - 1.335 * (wl / 1000)**2)) Tas = T.exp(-omega_a * tau_a * M) Edd = Tr * Tas Edsr = 0.5 * (1 - Tr**0.95) Edsa = Tr**1.5 * (1 - Tas) * Fa Ed = Edd + Edsr + Edsa Edd_Ed = Edd / Ed Edsr_Ed = Edsr / Ed Edsa_Ed = Edsa / Ed Eds_Ed = Edsr_Ed + Edsa_Ed ### Albert and Mobley bio-optical model implementation a_w, daw_dT, astar_ph, astar_y, Ls_Ed = T.vectors( 'a_w', 'daw_dT', 'astar_ph', 'astar_y', 'Ls_Ed') C_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_view, n_w, rho_s, rho_dd, rho_ds, delta = T.scalars( 'C_chl', 'C_sm', 'C_mie', 'n_mie', 'C_y', 'S_y', 'T_w', 'theta_view', 'n_w', 'rho_s', 'rho_dd', 'rho_ds', 'delta') # calc_a_ph a_ph = C_chl * astar_ph # calc_a_y wl_ref_y = 440 a_y = ifelse(T.eq(S_y, -1), C_y * astar_y, C_y * T.exp(-S_y * (wl - wl_ref_y))) # calc_a T_w_ref = 20. a_w_corr = a_w + (T_w - T_w_ref) * daw_dT a = a_w_corr + a_ph + a_y # calc_bb_sm bbstar_sm = 0.0086 bbstar_mie = 0.0042 wl_ref_mie = 500 bb_sm = C_sm * bbstar_sm + C_mie * bbstar_mie * (wl / wl_ref_mie)**n_mie # calc_bb b1 = ifelse(T.eq(n_w, 1.34), 0.00144, 0.00111) wl_ref_water = 500 S_water = -4.32 bb_water = b1 * (wl / wl_ref_water)**S_water bb = bb_water + bb_sm # calc omega_b omega_b = bb / (bb + a) # calc sun and viewing zenith angles under water theta_sun_ = theta_sun * np.pi / 180. theta_sun_ss = T.arcsin(T.sin(theta_sun_) / n_w) theta_view_ = theta_view * np.pi / 180. theta_view_ss = T.arcsin(T.sin(theta_view_) / n_w) p_f = [0.1034, 1, 3.3586, -6.5358, 4.6638, 2.4121] p_frs = [0.0512, 1, 4.6659, -7.8387, 5.4571, 0.1098, 0.4021] # calc subsurface reflectance f = p_f[0] * (p_f[1] + p_f[2] * omega_b + p_f[3] * omega_b**2 + p_f[4] * omega_b**3) * (1 + p_f[5] / T.cos(theta_sun_ss)) R0minus = f * omega_b # calc subsurface remote sensing reflectance frs = p_frs[0] * (p_frs[1] + p_frs[2] * omega_b + p_frs[3] * omega_b**2 + p_frs[4] * omega_b**3) * ( 1 + p_frs[5] / T.cos(theta_sun_ss)) * ( 1 + p_frs[6] / T.cos(theta_view_ss)) Rrs0minus = frs * omega_b # calc water surface reflected reflectance Rrs_refl = rho_s * Ls_Ed + rho_dd * Edd_Ed / np.pi + rho_ds * Eds_Ed / np.pi + delta # calc_Rrs0plus (Lee1998, eq22), R=Q*Rrs gamma = 0.48 zeta = 0.518 Rrs = zeta * Rrs0minus / (1 - gamma * R0minus) Lu_Ed = Rrs + Rrs_refl f = th.function([ beta, alpha, am, rh, pressure, C_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_sun, theta_view, n_w, rho_s, rho_dd, rho_ds, delta, wl, a_w, daw_dT, astar_ph, astar_y, Ls_Ed ], [Rrs, Rrs_refl, Lu_Ed, Ed], on_unused_input='warn') return f
# return T.mean(s, axis=0), variance # T.maximum(T.var(s, axis=0), eps) # Should eps be here?! def bound_loss(x, tnp = np) : eps = 1e-9 loss = tnp.maximum(tnp.maximum(eps,x-1), tnp.maximum(eps,-x)) + eps return tnp.maximum(loss, eps) + eps params_plus1 = shared(np.random.rand(nbatch, 2)) params_plus2 = shared(np.random.rand(nbatch, 2)) a = Print('invplus(x, params_plus1')(invplus(x, params_plus1)) b = Print('invplus(x, params_plus2')(invplus(y, params_plus2)) eps = 1e-9 a2 = T.arccos(T.clip(a, eps, 1)) b2 = T.arcsin(T.clip(b, eps, 1)) bl1 = bound_loss(a, tnp = T) bl2 = bound_loss(b, tnp = T) theta1_group = [] theta2_group = [] theta3_group = [] theta1_group.append(a2[:, 0]) theta1_group.append(b2[:, 0]) delta_group = [] delta_group.append(a2[:, 1]) delta_group.append(b2[:, 1]) delta_single, delta_var = singular(delta_group, name = 'delta') params_plus3 = shared(np.random.rand(nbatch, 1))