def main(db_file, pool, overwrite=False): # HACK: base_path = '../data/' db_path = path.join(base_path, 'db.sqlite') engine = db_connect(db_path) session = Session() # HACK: from astropy.table import Table tbl = Table.read('../paper/figures/group_llr_dv_tbl.ecsv', format='ascii.ecsv') worker = Worker(db_path=db_path, samples_path=path.abspath('../data/isochrone_samples'), overwrite=overwrite) # A little bit of a hack comoving = tbl['R_RV'] > tbl['R_mu'] tasks = session.query(Observation.id)\ .filter(Observation.group_id.in_(tbl['group_id'][comoving]))\ .group_by(Observation.group_id).all() tasks = tasks[:1] session.close() for r in pool.map(worker, tasks, callback=worker.callback): pass pool.close() sys.exit(0)
def main(): # TODO: bad, hard-coded... # base_path = '/Volumes/ProjectData/gaia-comoving-followup/' base_path = '../data/' db_path = path.join(base_path, 'db.sqlite') engine = db_connect(db_path) session = Session() credentials = dict(user='******', password='******') Gaia.login(**credentials) for obs in session.query(Observation).all(): q = session.query(Photometry).join(Observation).filter(Observation.id == obs.id).count() if q > 0: logger.debug('Photometry already exists') continue if obs.tgas_source is None: continue tgas_source_id = obs.tgas_source.source_id res = get_photometry(tgas_source_id) phot_kw = dict() for col in result_columns: phot_kw[col] = res[col] phot = Photometry(**phot_kw) phot.observation = obs session.add(phot) session.commit()
def work(self, id): engine = db_connect(self.db_path) session = Session() obs = session.query(Observation).filter(Observation.id == id).one() model = obs_to_starmodel(obs) # initial conditions for emcee walkers p0 = [] m0, age0, feh0 = model.ic.random_points(self.nwalkers, minmass=0.01, maxmass=10., minfeh=-1, maxfeh=1) _, max_distance = model.bounds('distance') _, max_AV = model.bounds('AV') d0 = 10**(np.random.uniform(0, np.log10(max_distance), size=self.nwalkers)) AV0 = np.random.uniform(0, max_AV, size=self.nwalkers) p0 += [m0] p0 += [age0, feh0, d0, AV0] p0 = np.array(p0).T npars = p0.shape[1] logger.debug('Running emcee - initial sampling...') sampler = emcee.EnsembleSampler(self.nwalkers, npars, model.lnpost) pos, prob, _ = sampler.run_mcmc(p0, self.ninit) # cull the weak walkers best_ix = sampler.flatlnprobability.argmax() best_p0 = (sampler.flatchain[best_ix][None] + np.random.normal(0, 1E-5, size=(self.nwalkers, npars))) sampler.reset() logger.debug('burn-in...') pos, prob, _ = sampler.run_mcmc(best_p0, self.nburn) sampler.reset() logger.debug('sampling...') _ = sampler.run_mcmc(pos, self.niter) model._sampler = sampler model._make_samples(0.01) return id, model
def main(db_path, run_root_path, drop_all=False, overwrite=False, **kwargs): # Make sure the specified paths actually exist db_path = path.abspath(db_path) run_root_path = path.abspath(run_root_path) for path_ in [path.dirname(db_path), run_root_path]: if not path.exists(path_): raise ValueError("Path '{0}' doesn't exist!".format(path_)) # -------------------------------------------------------------------------- # These are relative paths, so the script needs to be run from the # scripts path... # ID table for mapping group index to TGAS row ID_tbl = Table.read('../data/star_identifier.csv') # TGAS table logger.debug("Loading TGAS data...") tgas = Table.read('../../gaia-comoving-stars/data/stacked_tgas.fits') # Catalog of velocities for Bensby's HIP stars: bensby = Table.read('../data/bensbyrv_bestunique.csv') # -------------------------------------------------------------------------- # connect to the database engine = db_connect(db_path, ensure_db_exists=True) # engine.echo = True logger.debug("Connected to database at '{}'".format(db_path)) if drop_all: # remove all tables and replace Base.metadata.drop_all() Base.metadata.create_all() # create a new session for interacting with the database session = Session() logger.debug("Loading SpectralLineInfo table") line_info = OrderedDict() # air wavelength of Halpha -- wavelength calibration from comp lamp is done # at air wavelengths, so this is where Halpha should be, right? line_info['Halpha'] = 6562.8*u.angstrom # [OI] emission lines -- wavelengths from: # http://physics.nist.gov/PhysRefData/ASD/lines_form.html line_info['[OI] 5577'] = 5577.3387*u.angstrom line_info['[OI] 6300'] = 6300.304*u.angstrom line_info['[OI] 6364'] = 6363.776*u.angstrom for name, wvln in line_info.items(): n = session.query(SpectralLineInfo).filter(SpectralLineInfo.name == name).count() if n == 0: logger.debug('Loading line {0} at {1}'.format(name, wvln)) line = SpectralLineInfo(name=name, wavelength=wvln) session.add(line) session.commit() else: logger.debug('Line {0} already loaded'.format(name)) # Create an entry for this observing run data_path, run_name = path.split(run_root_path) logger.info("Path to night paths: {0}".format(data_path)) n = session.query(Run).filter(Run.name == run_name).count() if n == 0: logger.debug('Adding run {0} to database'.format(run_name)) run = Run(name=run_name) session.add(run) session.commit() elif n == 1: logger.debug('Loading run from database'.format(run_name)) run = session.query(Run).filter(Run.name == run_name).limit(1).one() else: raise RuntimeError("F**k.") # Now we need to go through each processed night of data and load all of the # relevant observations of sources. # First we get the column names for the Observation and TGASSource tables obs_columns = [str(c).split('.')[1] for c in Observation.__table__.columns] tgassource_columns = [str(c).split('.')[1] for c in TGASSource.__table__.columns] # Here's where there's a bit of hard-coded bewitchery - the nights (within # each run) have to be labeled 'n1', 'n2', and etc. Sorry. glob_pattr_proc = path.join(data_path, 'processed', run_name, 'n?') for proc_night_path in glob.glob(glob_pattr_proc): night = path.basename(proc_night_path) night_id = int(night[1]) logger.debug('Loading night {0}...'.format(night_id)) observations = [] tgas_sources = [] prior_rvs = [] glob_pattr_1d = path.join(proc_night_path, '1d_*.fit') for path_1d in ProgressBar(glob.glob(glob_pattr_1d)): hdr = fits.getheader(path_1d) # skip all except OBJECT observations if hdr['IMAGETYP'] != 'OBJECT': continue basename = path.basename(path_1d)[3:] logger.log(1, 'loading row for {0}'.format(basename)) kw = dict() # construct filenames using hard-coded bullshit kw['filename_raw'] = basename kw['filename_p'] = 'p_' + basename kw['filename_1d'] = '1d_' + basename # check if this filename is already in the database, if so, drop it base_query = session.query(Observation)\ .filter(Observation.filename_raw == kw['filename_raw']) already_loaded = base_query.count() > 0 if already_loaded and overwrite: base_query.delete() session.commit() elif already_loaded: logger.debug('Object {0} [{1}] already loaded' .format(hdr['OBJECT'], path.basename(kw['filename_raw']))) continue # read in header of 1d file and store keywords that exist as columns kw.update(fits_header_to_cols(hdr, obs_columns)) # HACK: skip empty object name if len(str(hdr['OBJECT'])) == 0: logger.warning('SKIPPING - empty OBJECT key') continue # get group id from object name if '-' in str(hdr['OBJECT']): # Per APW and SMOH's convention split_name = hdr['OBJECT'].split('-') kw['group_id'] = int(split_name[0]) # because: reasons if kw['group_id'] == 10: tgas_row_idx = int(split_name[1]) else: smoh_idx = int(split_name[1]) tgas_row_idx = ID_tbl[smoh_idx]['tgas_row'] tgas_row = tgas[tgas_row_idx] # query Simbad to get all possible names for this target if tgas_row['hip'] > 0: object_name = 'HIP{0}'.format(tgas_row['hip']) else: object_name = 'TYC {0}'.format(tgas_row['tycho2_id']) logger.log(1, 'common name: {0}'.format(object_name)) try: all_ids = Simbad.query_objectids(object_name)['ID'].astype(str) except Exception as e: logger.warning('Simbad query_objectids failed for "{0}" ' 'with error: {1}' .format(object_name, str(e))) all_ids = [] logger.log(1, 'this is a group object') if len(all_ids) > 0: logger.log(1, 'other names for this object: {0}' .format(', '.join(all_ids))) else: logger.log(1, 'simbad names for this object could not be ' 'retrieved') elif (isinstance(hdr['OBJECT'], int) or str(hdr['OBJECT']).startswith('k') or hdr['OBJECT'][0].isdigit()): # Assume it's a KIC number - per Ruth and Dan's convention if isinstance(hdr['OBJECT'], int): object_name = 'KIC {0:d}'.format(hdr['OBJECT']) elif hdr['OBJECT'].startswith('k'): object_name = 'KIC {0}'.format(hdr['OBJECT'][1:]) else: object_name = 'KIC {0}'.format(hdr['OBJECT']) # query Simbad to get all possible names for this target logger.log(1, 'common name: {0}'.format(object_name)) try: all_ids = Simbad.query_objectids(object_name)['ID'].astype(str) except Exception as e: logger.warning('Simbad query_objectids failed for "{0}" ' 'with error: {1}' .format(object_name, str(e))) all_ids = [] logger.log(1, 'this is a KIC object') if len(all_ids) > 0: logger.log(1, 'other names for this object: {0}' .format(', '.join(all_ids))) else: logger.log(1, 'simbad names for this object could not be ' 'retrieved') # get the Tycho 2 ID, if it has one hip_id = [id_ for id_ in all_ids if 'HIP' in id_] tyc_id = [id_ for id_ in all_ids if 'TYC' in id_] if hip_id: hip_id = int(hip_id[0].replace('HIP', '').strip()) logger.log(1, 'source has HIP id: {0}'.format(hip_id)) tgas_row_idx = np.where(tgas['hip'] == hip_id)[0] if len(tgas_row_idx) == 0: tgas_row_idx = None else: tgas_row = tgas[tgas_row_idx] elif tyc_id: tyc_id = tyc_id[0].replace('TYC', '').strip() logger.log(1, 'source has tycho 2 id: {0}'.format(tyc_id)) tgas_row_idx = np.where(tgas['tycho2_id'] == tyc_id)[0] if len(tgas_row_idx) == 0: tgas_row_idx = None else: tgas_row = tgas[tgas_row_idx] else: logger.log(1, 'source has no HIP or TYC id.') tgas_row_idx = None # result_table = Simbad.query_object(object_name) else: object_name = hdr['OBJECT'] logger.log(1, 'common name: {0}'.format(object_name)) logger.log(1, 'this is not a group object') # query Simbad to get all possible names for this target try: all_ids = Simbad.query_objectids(object_name)['ID'].astype(str) except Exception as e: logger.warning('SKIPPING: Simbad query_objectids failed for ' '"{0}" with error: {1}' .format(object_name, str(e))) continue # get the Tycho 2 ID, if it has one hip_id = [id_ for id_ in all_ids if 'HIP' in id_] tyc_id = [id_ for id_ in all_ids if 'TYC' in id_] if hip_id: hip_id = int(hip_id[0].replace('HIP', '').strip()) logger.log(1, 'source has HIP id: {0}'.format(hip_id)) tgas_row_idx = np.where(tgas['hip'] == hip_id)[0] if len(tgas_row_idx) == 0: tgas_row_idx = None else: tgas_row = tgas[tgas_row_idx] elif tyc_id: tyc_id = tyc_id[0].replace('TYC', '').strip() logger.log(1, 'source has tycho 2 id: {0}'.format(tyc_id)) tgas_row_idx = np.where(tgas['tycho2_id'] == tyc_id)[0] if len(tgas_row_idx) == 0: tgas_row_idx = None else: tgas_row = tgas[tgas_row_idx] else: logger.log(1, 'source has no tycho 2 id.') tgas_row_idx = None # store relevant names / IDs simbad_info_kw = dict() for id_ in all_ids: if id_.lower().startswith('hd'): simbad_info_kw['hd_id'] = id_[2:] elif id_.lower().startswith('hip'): simbad_info_kw['hip_id'] = id_[3:] elif id_.lower().startswith('tyc'): simbad_info_kw['tyc_id'] = id_[3:] elif id_.lower().startswith('2mass'): simbad_info_kw['twomass_id'] = id_[5:] for k,v in simbad_info_kw.items(): simbad_info_kw[k] = v.strip() simbad_info = SimbadInfo(**simbad_info_kw) # Compute barycenter velocity given coordinates of where the # telescope was pointing and observation time t = Time(hdr['JD'], format='jd', scale='utc') sc = coord.SkyCoord(ra=hdr['RA'], dec=hdr['DEC'], unit=(u.hourangle, u.degree)) kw['v_bary'] = bary_vel_corr(t, sc, location=kitt_peak) obs = Observation(night=night_id, **kw) obs.run = run # Get the TGAS data if the source is in TGAS if tgas_row_idx is not None: logger.log(1, 'TGAS row: {0}'.format(tgas_row_idx)) tgas_kw = dict() tgas_kw['row_index'] = tgas_row_idx for name in tgas.colnames: if name in tgassource_columns: tgas_kw[name] = tgas_row[name] job = Gaia.launch_job(gaia_query.format(tgas_kw['source_id'][0]), dump_to_file=False) res = job.get_results() if len(res) == 0: logger.warning("No 2MASS data found for: {0}" .format(tgas_kw['source_id'])) elif len(res) == 1: tgas_kw['J'] = res['j_m'][0] tgas_kw['J_err'] = res['j_msigcom'][0] tgas_kw['H'] = res['h_m'][0] tgas_kw['H_err'] = res['h_msigcom'][0] tgas_kw['Ks'] = res['ks_m'][0] tgas_kw['Ks_err'] = res['ks_msigcom'][0] tgas_source = TGASSource(**tgas_kw) tgas_sources.append(tgas_source) obs.tgas_source = tgas_source else: logger.log(1, 'TGAS row could not be found.') obs.simbad_info = simbad_info observations.append(obs) # retrieve a previous measurement from the literature result = get_best_rv(obs) if result is not None: rv, rv_err, rv_qual, rv_bibcode, rv_source = result prv = PriorRV(rv=rv*u.km/u.s, err=rv_err*u.km/u.s, qual=rv_qual, bibcode=rv_bibcode, source=rv_source) obs.prior_rv = prv prior_rvs.append(prv) logger.log(1, '-'*68) session.add_all(observations) session.add_all(tgas_sources) session.add_all(prior_rvs) session.commit() # Last thing to do is cross-match with the Bensby catalog to # replace velocities when they are better for sim_info in session.query(SimbadInfo)\ .filter(SimbadInfo.hip_id != None).all(): hip_id = 'HIP' + str(sim_info.hip_id) row = bensby[bensby['OBJECT'] == hip_id] if len(row) > 0: sim_info.rv = row['velValue'] sim_info.rv_qual = row['quality'] sim_info.rv_bibcode = row['bibcode'] session.flush() session.close()
def main(db_path, run_name, data_root_path=None, filename=None, overwrite=False, pool=None): if pool is None: pool = schwimmbad.SerialPool() # connect to the database engine = db_connect(db_path) # engine.echo = True logger.debug("Connected to database at '{}'".format(db_path)) # create a new session for interacting with the database session = Session() root_path, _ = path.split(db_path) if data_root_path is None: data_root_path = root_path plot_path = path.join(root_path, 'plots', run_name) if not path.exists(plot_path): os.makedirs(plot_path, exist_ok=True) # TODO: there might be some bugs here... n_lines = session.query(SpectralLineInfo).count() Halpha = session.query(SpectralLineInfo)\ .filter(SpectralLineInfo.name == 'Halpha').one() OI_lines = session.query(SpectralLineInfo)\ .filter(SpectralLineInfo.name.contains('[OI]')).all() if filename is None: # grab all unfinished sources observations = session.query(Observation).join(Run)\ .filter(Run.name == run_name).all() else: # only process the observation corresponding to this filename observations = session.query(Observation).join(Run)\ .filter(Run.name == run_name)\ .filter(Observation.filename_raw == filename).all() for obs in observations: measurements = session.query(SpectralLineMeasurement)\ .join(Observation)\ .filter(Observation.id == obs.id).all() if len(measurements) == n_lines and not overwrite: logger.debug('All line measurements already complete for object ' '{0} in file {1}'.format(obs.object, obs.filename_raw)) continue # Read the spectrum data and get wavelength solution filebase, _ = path.splitext(obs.filename_1d) filename_1d = obs.path_1d(data_root_path) spec = Table.read(filename_1d) logger.debug('Loaded 1D spectrum for object {0} from file {1}'.format( obs.object, filename_1d)) # Extract region around Halpha x, (flux, ivar) = extract_region( spec['wavelength'], center=Halpha.wavelength.value, width=100, arrs=[spec['source_flux'], spec['source_ivar']]) # We start by doing maximum likelihood estimation to fit the line, then # use the best-fit parameters to initialize an MCMC run. # TODO: need to figure out if it's emission or absorption...for now just # assume absorption absorp_emiss = -1. lf = VoigtLineFitter(x, flux, ivar, absorp_emiss=absorp_emiss) lf.fit() fit_pars = lf.get_gp_mean_pars() if (not lf.success or abs(fit_pars['x0'] - Halpha.wavelength.value) > 16. or # 16 Å = ~700 km/s abs(fit_pars['amp']) < 10): # minimum amplitude - MAGIC NUMBER # TODO: should try again with emission line logger.error('absorption line has tiny amplitude! did ' 'auto-determination of absorption/emission fail?') # TODO: what now? continue fig = lf.plot_fit() fig.savefig(path.join(plot_path, '{}_maxlike.png'.format(filebase)), dpi=256) plt.close(fig) # ---------------------------------------------------------------------- # Run `emcee` instead to sample over GP model parameters: if fit_pars['std_G'] < 1E-2: lf.gp.freeze_parameter('mean:ln_std_G') initial = np.array(lf.gp.get_parameter_vector()) if initial[4] < -10: # TODO: ??? initial[4] = -8. if initial[5] < -10: # TODO: ??? initial[5] = -8. ndim, nwalkers = len(initial), 64 sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, pool=pool, args=(lf.gp, flux)) logger.debug("Running burn-in...") p0 = initial + 1e-6 * np.random.randn(nwalkers, ndim) p0, lp, _ = sampler.run_mcmc(p0, 128) logger.debug("Running 2nd burn-in...") sampler.reset() p0 = p0[lp.argmax()] + 1e-3 * np.random.randn(nwalkers, ndim) p0, lp, _ = sampler.run_mcmc(p0, 512) logger.debug("Running production...") sampler.reset() pos, lp, _ = sampler.run_mcmc(p0, 1024) fit_kw = dict() for i, par_name in enumerate(lf.gp.get_parameter_names()): if 'kernel' in par_name: continue # remove 'mean:' par_name = par_name[5:] # skip bg if par_name.startswith('bg'): continue samples = sampler.flatchain[:, i] if par_name.startswith('ln_'): par_name = par_name[3:] samples = np.exp(samples) MAD = np.median(np.abs(samples - np.median(samples))) fit_kw[par_name] = np.median(samples) fit_kw[par_name + '_error'] = 1.5 * MAD # convert to ~stddev # remove all previous line measurements q = session.query(SpectralLineMeasurement).join(Observation)\ .filter(Observation.id == obs.id) if q.count() > 0: for meas in q.all(): session.delete(meas) session.commit() slm = SpectralLineMeasurement(**fit_kw) slm.info = Halpha slm.observation = obs session.add(slm) session.commit() # -------------------------------------------------------------------- # plot MCMC traces fig, axes = plt.subplots(2, 4, figsize=(18, 6)) for i in range(sampler.dim): for walker in sampler.chain[..., i]: axes.flat[i].plot(walker, marker='', drawstyle='steps-mid', alpha=0.2) axes.flat[i].set_title(lf.gp.get_parameter_names()[i], fontsize=12) fig.tight_layout() fig.savefig(path.join(plot_path, '{}_mcmc_trace.png'.format(filebase)), dpi=256) plt.close(fig) # -------------------------------------------------------------------- # -------------------------------------------------------------------- # plot samples fig, axes = plt.subplots(3, 1, figsize=(10, 10), sharex=True) samples = sampler.flatchain for s in samples[np.random.randint(len(samples), size=32)]: lf.gp.set_parameter_vector(s) lf.plot_fit(axes=axes, fit_alpha=0.2) fig.tight_layout() fig.savefig(path.join(plot_path, '{}_mcmc_fits.png'.format(filebase)), dpi=256) plt.close(fig) # -------------------------------------------------------------------- # -------------------------------------------------------------------- # corner plot fig = corner.corner( sampler.flatchain[::10, :], labels=[x.split(':')[1] for x in lf.gp.get_parameter_names()]) fig.savefig(path.join(plot_path, '{}_corner.png'.format(filebase)), dpi=256) plt.close(fig) # -------------------------------------------------------------------- # compute centroids for sky lines sky_centroids = [] for j, sky_line in enumerate(OI_lines): wvln = sky_line.wavelength.value x, (flux, ivar) = extract_region( spec['wavelength'], center=wvln, width=32., # angstroms arrs=[spec['background_flux'], spec['background_ivar']]) lf = GaussianLineFitter(x, flux, ivar, absorp_emiss=1.) # all emission lines try: lf.fit() fit_pars = lf.get_gp_mean_pars() except Exception as e: logger.warn("Failed to fit sky line {0}:\n{1}".format( sky_line, e)) lf.success = False fit_pars = lf.get_init() # OMG this is the biggest effing hack fit_pars['amp'] = 0. fit_pars['bg_coef'] = None fit_pars['x0'] = 0. # HACK: hackish signal-to-noise max_ = fit_pars['amp'] / np.sqrt(2 * np.pi * fit_pars['std']**2) SNR = max_ / np.median(1 / np.sqrt(ivar)) if (not lf.success or abs(fit_pars['x0'] - wvln) > 4 or fit_pars['amp'] < 10 or fit_pars['std'] > 4 or SNR < 2.5): # failed x0 = np.nan * u.angstrom title = 'f****d' fit_pars['amp'] = 0. else: x0 = fit_pars['x0'] * u.angstrom title = '{:.2f}'.format(fit_pars['amp']) if lf.success: fig = lf.plot_fit() fig.suptitle(title, y=0.95) fig.subplots_adjust(top=0.8) fig.savefig(path.join( plot_path, '{}_maxlike_sky_{:.0f}.png'.format(filebase, wvln)), dpi=256) plt.close(fig) # store the sky line measurements fit_pars['std_G'] = fit_pars.pop('std') # HACK fit_pars.pop('bg_coef') # HACK slm = SpectralLineMeasurement(**fit_pars) slm.info = sky_line slm.observation = obs session.add(slm) session.commit() sky_centroids.append(x0) sky_centroids = u.Quantity(sky_centroids) logger.info('{} [{}]: x0={x0:.3f} σ={err:.3f}\n--------'.format( obs.object, filebase, x0=fit_kw['x0'], err=fit_kw['x0_error'])) session.commit() pool.close()
def main(db_path, run_name, overwrite=False, pool=None): if pool is None: pool = schwimmbad.SerialPool() # connect to the database engine = db_connect(db_path) # engine.echo = True logger.debug("Connected to database at '{}'".format(db_path)) # create a new session for interacting with the database session = Session() root_path, _ = path.split(db_path) plot_path = path.join(root_path, 'plots', run_name) if not path.exists(plot_path): os.makedirs(plot_path, exist_ok=True) # get object to correct the observed RV's rv_corr = RVCorrector(session, run_name) observations = session.query(Observation).join(Run)\ .filter(Run.name == run_name).all() for obs in observations: q = session.query(RVMeasurement).join(Observation)\ .filter(Observation.id == obs.id) if q.count() > 0 and not overwrite: logger.debug('RV measurement already complete for object ' '{0} in file {1}'.format(obs.object, obs.filename_raw)) continue elif q.count() > 1: raise RuntimeError( 'Multiple RV measurements found for object {0}'.format(obs)) elif len(obs.measurements) == 0: logger.debug( 'Observation {0} has no line measurements.'.format(obs)) continue corrected_rv, err, flag = rv_corr.get_corrected_rv(obs) # remove previous RV measurements if q.count() > 0: session.delete(q.one()) session.commit() rv_meas = RVMeasurement(rv=corrected_rv, err=err, flag=flag) rv_meas.observation = obs session.add(rv_meas) session.commit() pool.close()
def main(): # TODO: bad, hard-coded... # base_path = '/Volumes/ProjectData/gaia-comoving-followup/' base_path = '../../data/' db_path = path.join(base_path, 'db.sqlite') engine = db_connect(db_path) session = Session() chain_path = path.abspath('./isochrone_chains') os.makedirs(chain_path, exist_ok=True) # Check out the bottom of "Color-magnitude diagram.ipynb": interesting_group_ids = [1500, 1229, 1515] all_photometry = OrderedDict([ ('1500-8455', OrderedDict([('J', (6.8379998, 0.021)), ('H', (6.4640002, 0.017000001)), ('K', (6.3369999, 0.017999999)), ('W1', (6.2950001, 0.093000002)), ('W2', (6.2490001, 0.026000001)), ('W3', (6.3330002, 0.015)), ('B', (9.5950003, 0.022)), ('V', (8.5120001, 0.014))])), ('1500-1804', OrderedDict([('J', (6.9039998, 0.041000001)), ('H', (6.8559999, 0.027000001)), ('K', (6.7989998, 0.017000001)), ('W1', (6.803, 0.064999998)), ('W2', (6.7600002, 0.018999999)), ('W3', (6.8270001, 0.016000001)), ('B', (7.4980001, 0.015)), ('V', (7.289, 0.011))])), ('1229-1366', OrderedDict([('J', (6.7290001, 0.024)), ('H', (6.2449999, 0.02)), ('K', (6.1529999, 0.023)), ('W1', (6.1799998, 0.096000001)), ('W2', (6.04, 0.035)), ('W3', (6.132, 0.016000001)), ('B', (9.5539999, 0.021)), ('V', (8.4619999, 0.014))])), ('1229-7470', OrderedDict([ ('J', (9.1709995, 0.024)), ('H', (8.7959995, 0.026000001)), ('K', (8.7299995, 0.022)), ('W1', (8.6669998, 0.023)), ('W2', (8.7189999, 0.02)), ('W3', (8.6680002, 0.025)), ('B', (11.428, 0.054000001)), ('V', (10.614, 0.039999999)) ])), ('1515-3584', OrderedDict([('J', (5.363999843597412, 0.024000000208616257)), ('H', (4.965000152587891, 0.035999998450279236)), ('K', (4.815999984741211, 0.032999999821186066)), ('W1', (4.758, 0.215)), ('W2', (4.565, 0.115)), ('W3', (4.771, 0.015)), ('B', (8.347999572753906, 0.01600000075995922)), ('V', (7.182000160217285, 0.009999999776482582))])), ('1515-1834', OrderedDict([('J', (8.855999946594238, 0.024000000208616257)), ('H', (8.29699993133545, 0.020999999716877937)), ('K', (8.178999900817871, 0.017999999225139618)), ('W1', (8.117, 0.022)), ('W2', (8.15, 0.019)), ('W3', (8.065, 0.02)), ('B', (12.309000015258789, 0.11999999731779099)), ('V', (11.069999694824219, 0.054999999701976776))])) ]) for k in all_photometry: samples_file = path.join(chain_path, '{0}.hdf5'.format(k)) if path.exists(samples_file): logger.info("skipping {0} - samples exist at {1}".format( k, samples_file)) continue phot = all_photometry[k] obs = session.query(Observation).filter(Observation.object == k).one() plx = (obs.tgas_source.parallax, obs.tgas_source.parallax_error) # fit an isochrone model = StarModel(iso, use_emcee=True, parallax=plx, **phot) model.set_bounds(mass=(0.01, 20), feh=(-1, 1), distance=(0, 300), AV=(0, 1)) # initial conditions for emcee walkers nwalkers = 128 p0 = [] m0, age0, feh0 = model.ic.random_points(nwalkers, minmass=0.01, maxmass=10., minfeh=-1, maxfeh=1) _, max_distance = model.bounds('distance') _, max_AV = model.bounds('AV') d0 = 10**(np.random.uniform(0, np.log10(max_distance), size=nwalkers)) AV0 = np.random.uniform(0, max_AV, size=nwalkers) p0 += [m0] p0 += [age0, feh0, d0, AV0] p0 = np.array(p0).T npars = p0.shape[1] # run emcee ninit = 256 nburn = 1024 niter = 4096 logger.debug('Running emcee - initial sampling...') sampler = emcee.EnsembleSampler(nwalkers, npars, model.lnpost) # pos, prob, state = sampler.run_mcmc(p0, ninit) for pos, prob, state in tqdm(sampler.sample(p0, iterations=ninit), total=ninit): pass # cull the weak walkers best_ix = sampler.flatlnprobability.argmax() best_p0 = (sampler.flatchain[best_ix][None] + np.random.normal(0, 1E-5, size=(nwalkers, npars))) sampler.reset() logger.debug('burn-in...') for pos, prob, state in tqdm(sampler.sample(best_p0, iterations=nburn), total=nburn): pass # pos,_,_ = sampler.run_mcmc(best_p0, nburn) sampler.reset() logger.debug('sampling...') # _ = sampler.run_mcmc(pos, niter) for pos, prob, state in tqdm(sampler.sample(pos, iterations=niter), total=niter): pass model._sampler = sampler model._make_samples(0.08) model.samples.to_hdf(samples_file, key='samples') # np.save('isochrone_chains/chain.npy', sampler.chain) logger.debug('...done and saved!')