Пример #1
0
# use None if not desired
texout = None
#texout = './MCMC_fit_final_thin.tex'
# whether or not to evaluate all the isochrones to get inferred properties
# in the TeX file (adds a lot of time)
inferredparams = True

# iteration where burn-in stops
burnin = 20000
# make the triangle plot
maketriangle = True

# ========================================================================== #

if fitlimb:
    labels.append('$u_{S1,1}$')
    labels.append('$u_{S1,2}$')

nparams = len(labels)

x = np.loadtxt(infile)
print 'File loaded'

# split the metadata from the chain results
iteration = x[:, 0]
walkers = x[:, 1]
uwalkers = np.unique(walkers)
loglike = x[:, 2]
x = x[:, 3:]

# thin the file if we want to speed things up
Пример #2
0
# use None if not desired
texout = None

# whether or not to evaluate all the isochrones to get inferred properties
# in the TeX file (adds a lot of time)
inferredparams = False

# iteration where burn-in stops
burnin = 20000
# make the triangle plot
maketriangle = True

# ========================================================================== #

if fitlimb:
    labels.append('$u_{S1,1}$')
    labels.append('$u_{S1,2}$')

nparams = len(labels)

x = np.loadtxt(infile_Brewer)
print 'File loaded'

# split the metadata from the chain results
iteration = x[:, 0]
walkers = x[:, 1]
uwalkers = np.unique(walkers)
loglike = x[:, 2]
x = x[:, 3:]

# thin the file if we want to speed things up
Пример #3
0
# p_start from spc median light curve fit (per, ttran, ecosw, esinw, b, R2, m1, feh, age, f2f1)
# and RV SPC median fit (gamma, gamma_os, jitter1, jitter2)
p_start = [
    8.81805033e+01, 8.54191297e+01, 1.47166426e-02, -1.22324907e-02,
    6.72741838e-01, 1.249e-02, 9.64221684e-01, 2.29024719e-01, 2.55001938e+00,
    1.97466051e+01, -2.73888308e+01, -4.36388788e+01, 3.50609998e-02,
    1.05965734e-01, 0.001127
]

#p = p_start

# add limb darkening parameters if we want to try to fit for them
if fitlimb:
    p = np.concatenate((p, np.array([5.64392567e-02, 5.07460729e-01])))
    labels.append('$u_{S1,1}$')
    labels.append('$u_{S1,2}$')

# set up the crowding parameters for each event
crowding = np.ones(len(equarts))
if usecrowd:
    for ii in np.arange(len(crowding)):
        crowding[ii] = quartcontam[equarts[ii] - 1]

# just define segments of data as any data gap more than 4 days
edges = np.where(np.abs(np.diff(t)) > 4.)[0] + 1
cuts = np.zeros(len(t)).astype(np.int)
# increment the start of a new segment by 1
cuts[edges] = 1
cuts = np.cumsum(cuts)
ncuts = cuts[-1] + 1
Пример #4
0
# use None if not desired
texout = 'None'

# whether or not to evaluate all the isochrones to get inferred properties
# in the TeX file (adds a lot of time)
inferredparams = False

# iteration where burn-in stops
burnin = 20000
# make the triangle plot
maketriangle = True

# ========================================================================== #

if fitlimb:
    labels.append('$u_{S1,1}$')
    labels.append('$u_{S1,2}$')

nparams = len(labels)

x = np.loadtxt(infile_Brewer)
print 'File loaded'

# split the metadata from the chain results
iteration = x[:, 0]
walkers = x[:, 1]
uwalkers = np.unique(walkers)
loglike = x[:, 2]
x = x[:, 3:]

# thin the file if we want to speed things up
Пример #5
0
#              1.13214456e+02, 9.97091028e-01])
# median solution from the MCMC analysis (with crowding)
#p = np.array([8.81805180e+01, 8.54189900e+01, 1.47132293e-02, 4.83767012e-04,
#              7.05595086e-01, 2.40081224e+00, 6.33573877e-01, 1.04177206e+00,
#              3.94625983e-01, 1.62016796e+00, 8.08342999e+02, 2.46057348e-02,
#              1.17068978e+02, 1.00122149e+00])
# absolute minimum chi-square found in MCMC (with crowding)
p = np.array([8.81805979e+01, 8.54189422e+01, 1.47105950e-02, 5.83059972e-03,
              7.02722610e-01, 2.35546161e+00, 6.26868773e-01, 1.03255051e+00,
              3.46963869e-01, 1.71307399e+00, 7.99324162e+02, 1.51296591e-02,
              1.23274350e+02, 1.00831069e+00])

# add limb darkening parameters if we want to try to fit for them
if fitlimb:
    p = np.concatenate((p, np.array([5.64392567e-02,  5.07460729e-01])))
    labels.append('$u_{S1,1}$')
    labels.append('$u_{S1,2}$')

# set up the crowding parameters for each event
crowding = np.ones(len(equarts))
if usecrowd:
    for ii in np.arange(len(crowding)):
        crowding[ii] = quartcontam[equarts[ii]-1]

# just define segments of data as any data gap more than 4 days
edges = np.where(np.abs(np.diff(t)) > 4.)[0] + 1
cuts = np.zeros(len(t)).astype(np.int)
# increment the start of a new segment by 1
cuts[edges] = 1
cuts = np.cumsum(cuts)
ncuts = cuts[-1] + 1