Beispiel #1
0
def assign_likelihood_ratios_xml(xmldoc, coinc_def_id, offset_vectors, vetoseglists, events_func, veto_func, ln_likelihood_ratio_func, likelihood_params_func, verbose = False, params_func_extra_args = ()):
	"""
	Assigns likelihood ratio values to coincidences (XML version).
	"""
	#
	# Iterate over all coincs, assigning likelihood ratios.
	#

	coinc_event_table = lsctables.CoincTable.get_table(xmldoc)

	if verbose:
		progressbar = ProgressBar("computing likelihood ratios", max = len(coinc_event_table))
	else:
		progressbar = None

	for coinc_event in coinc_event_table:
		if progressbar is not None:
			progressbar.increment()
		if coinc_event.coinc_def_id != coinc_def_id:
			continue
		params = likelihood_params_func([event for event in events_func(None, coinc_event.coinc_event_id) if veto_func(event, vetoseglists)], offset_vectors[coinc_event.time_slide_id], *params_func_extra_args)
		coinc_event.likelihood = ln_likelihood_ratio_func(params) if params is not None else None

	del progressbar

	#
	# Done
	#

	return
def time_slides_vacuum(time_slides, verbose=False):
    """
	Given a dictionary mapping time slide IDs to instrument-->offset
	mappings, for example as returned by the as_dict() method of the
	TimeSlideTable class in glue.ligolw.lsctables or by the
	load_time_slides() function in this module, construct and return a
	mapping indicating time slide equivalences.  This can be used to
	delete redundant time slides from a time slide table, and then also
	used via the applyKeyMapping() method of glue.ligolw.table.Table
	instances to update cross references (for example in the
	coinc_event table).

	Example:

	>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
	"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
	0, "H2": 10}}
	>>> time_slides_vacuum(slides)
	{'time_slide_id:1': 'time_slide_id:0'}

	indicating that time_slide_id:1 describes a time slide that is
	equivalent to time_slide_id:0.  The calling code could use this
	information to delete time_slide_id:1 from the time_slide table,
	and replace references to that ID in other tables with references
	to time_slide_id:0.
	"""
    # convert offsets to deltas
    time_slides = dict((time_slide_id, offsetvect.deltas)
                       for time_slide_id, offsetvect in time_slides.items())
    if verbose:
        progressbar = ProgressBar(max=len(time_slides))
    else:
        progressbar = None
    # old --> new mapping
    mapping = {}
    # while there are time slide offset dictionaries remaining
    while time_slides:
        # pick an ID/offset dictionary pair at random
        id1, deltas1 = time_slides.popitem()
        # for every other ID/offset dictionary pair in the time
        # slides
        ids_to_delete = []
        for id2, deltas2 in time_slides.items():
            # if the relative offset dictionaries are
            # equivalent record in the old --> new mapping
            if deltas2 == deltas1:
                mapping[id2] = id1
                ids_to_delete.append(id2)
        for id2 in ids_to_delete:
            time_slides.pop(id2)
        if progressbar is not None:
            progressbar.update(progressbar.max - len(time_slides))
    # done
    del progressbar
    return mapping
Beispiel #3
0
def time_slides_vacuum(time_slides, verbose = False):
	"""
	Given a dictionary mapping time slide IDs to instrument-->offset
	mappings, for example as returned by the as_dict() method of the
	TimeSlideTable class in glue.ligolw.lsctables or by the
	load_time_slides() function in this module, construct and return a
	mapping indicating time slide equivalences.  This can be used to
	delete redundant time slides from a time slide table, and then also
	used via the applyKeyMapping() method of glue.ligolw.table.Table
	instances to update cross references (for example in the
	coinc_event table).

	Example:

	>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
	"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
	0, "H2": 10}}
	>>> time_slides_vacuum(slides)
	{'time_slide_id:1': 'time_slide_id:0'}

	indicating that time_slide_id:1 describes a time slide that is
	equivalent to time_slide_id:0.  The calling code could use this
	information to delete time_slide_id:1 from the time_slide table,
	and replace references to that ID in other tables with references
	to time_slide_id:0.
	"""
	# convert offsets to deltas
	time_slides = dict((time_slide_id, offsetvect.deltas) for time_slide_id, offsetvect in time_slides.items())
	if verbose:
		progressbar = ProgressBar(max = len(time_slides))
	else:
		progressbar = None
	# old --> new mapping
	mapping = {}
	# while there are time slide offset dictionaries remaining
	while time_slides:
		# pick an ID/offset dictionary pair at random
		id1, deltas1 = time_slides.popitem()
		# for every other ID/offset dictionary pair in the time
		# slides
		ids_to_delete = []
		for id2, deltas2 in time_slides.items():
			# if the relative offset dictionaries are
			# equivalent record in the old --> new mapping
			if deltas2 == deltas1:
				mapping[id2] = id1
				ids_to_delete.append(id2)
		for id2 in ids_to_delete:
			time_slides.pop(id2)
		if progressbar is not None:
			progressbar.update(progressbar.max - len(time_slides))
	# done
	del progressbar
	return mapping
Beispiel #4
0
def ligolw_inspinjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, end_time_bisect_window = 1.0, verbose = False):
	#
	# Analyze the document's contents.
	#

	if verbose:
		print("indexing ...", file=sys.stderr)

	bbdef = {"inspiral": thinca.InspiralCoincDef}[search]
	sbdef = {"inspiral": InspiralSICoincDef}[search]
	scedef = {"inspiral": InspiralSCExactCoincDef}[search]
	scndef = {"inspiral": InspiralSCNearCoincDef}[search]

	contents = DocContents(xmldoc = xmldoc, bbdef = bbdef, sbdef = sbdef, scedef = scedef, scndef = scndef, process = process, end_time_bisect_window = end_time_bisect_window)

	#
	# Find sim_inspiral <--> sngl_inspiral coincidences.
	#

	progressbar = ProgressBar(max = len(contents.siminspiraltable), textwidth = 35, text = sbdef.description) if verbose else None
	for sim in contents.siminspiraltable:
		if progressbar is not None:
			progressbar.increment()
		inspirals = find_sngl_inspiral_matches(contents, sim, snglcomparefunc)
		if inspirals:
			add_sim_inspiral_coinc(contents, sim, inspirals)
	del progressbar

	#
	# Find sim_inspiral <--> coinc_event coincidences.
	#

	if contents.scn_coinc_def_id:
		progressbar = ProgressBar(max = len(contents.siminspiraltable), textwidth = 35, text = scndef.description) if verbose else None
		for sim in contents.siminspiraltable:
			if progressbar is not None:
				progressbar.increment()
			coincs = contents.coincs_near_endtime(sim.time_geocent)
			exact_coinc_event_ids = find_exact_coinc_matches(coincs, sim, snglcomparefunc)
			near_coinc_event_ids = find_near_coinc_matches(coincs, sim, nearcoinccomparefunc)
			assert exact_coinc_event_ids.issubset(near_coinc_event_ids)
			if exact_coinc_event_ids:
				add_sim_coinc_coinc(contents, sim, exact_coinc_event_ids, contents.sce_coinc_def_id)
			if near_coinc_event_ids:
				add_sim_coinc_coinc(contents, sim, near_coinc_event_ids, contents.scn_coinc_def_id)
		del progressbar

	#
	# Restore the original event order.
	#

	if verbose:
		print("finishing ...", file=sys.stderr)
	contents.sort_triggers_by_id()

	#
	# Done.
	#

	return xmldoc
Beispiel #5
0
 def is_healthy(self, verbose=False):
     # do we believe the PDFs are sufficiently well-defined to
     # compute FAPs and FARs?
     health = min(self.noise_lr_lnpdf.array.sum() / 1000000.,
                  self.zero_lag_lr_lnpdf.array.sum() / 10000.)
     if verbose:
         ProgressBar(text="ranking stat. health", value=health).show()
     return health >= 1.
Beispiel #6
0
def ligolw_inspinjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, end_time_bisect_window = 1.0, verbose = False):
	#
	# Analyze the document's contents.
	#

	if verbose:
		print >>sys.stderr, "indexing ..."

	bbdef = {"inspiral": ligolw_thinca.InspiralCoincDef}[search]
	sbdef = {"inspiral": InspiralSICoincDef}[search]
	scedef = {"inspiral": InspiralSCExactCoincDef}[search]
	scndef = {"inspiral": InspiralSCNearCoincDef}[search]

	contents = DocContents(xmldoc = xmldoc, bbdef = bbdef, sbdef = sbdef, scedef = scedef, scndef = scndef, process = process, end_time_bisect_window = end_time_bisect_window)

	#
	# Find sim_inspiral <--> sngl_inspiral coincidences.
	#

	progressbar = ProgressBar(max = len(contents.siminspiraltable), textwidth = 35, text = sbdef.description) if verbose else None
	for sim in contents.siminspiraltable:
		if progressbar is not None:
			progressbar.increment()
		inspirals = find_sngl_inspiral_matches(contents, sim, snglcomparefunc)
		if inspirals:
			add_sim_inspiral_coinc(contents, sim, inspirals)
	del progressbar

	#
	# Find sim_inspiral <--> coinc_event coincidences.
	#

	if contents.scn_coinc_def_id:
		progressbar = ProgressBar(max = len(contents.siminspiraltable), textwidth = 35, text = scndef.description) if verbose else None
		for sim in contents.siminspiraltable:
			if progressbar is not None:
				progressbar.increment()
			coincs = contents.coincs_near_endtime(sim.get_end())
			exact_coinc_event_ids = find_exact_coinc_matches(coincs, sim, snglcomparefunc)
			near_coinc_event_ids = find_near_coinc_matches(coincs, sim, nearcoinccomparefunc)
			assert exact_coinc_event_ids.issubset(near_coinc_event_ids)
			if exact_coinc_event_ids:
				add_sim_coinc_coinc(contents, sim, exact_coinc_event_ids, contents.sce_coinc_def_id)
			if near_coinc_event_ids:
				add_sim_coinc_coinc(contents, sim, near_coinc_event_ids, contents.scn_coinc_def_id)
		del progressbar

	#
	# Restore the original event order.
	#

	if verbose:
		print >>sys.stderr, "finishing ..."
	contents.sort_triggers_by_id()

	#
	# Done.
	#

	return xmldoc
Beispiel #7
0
 def make_db(db_filename, filenames):
     # Make the sqlite database if one does not exist
     connection = sqlite3.connect(db_filename)
     cursor = connection.cursor()
     # Create Table 1 that contains:
     # - the filenames of the hdf5 subbank files
     # - their respective ID integers
     cursor.execute("CREATE TABLE fname (filename_id INTEGER PRIMARY KEY, filename TEXT);")
     # Create Table 2 that contains:
     # - k (integers which will be populated later, this column gives the order in which overlaps are placed),
     # - ID integers of the filenames (instead of saving the entire filename to the database - this is to save memory)
     # - m1, m2, chi1, chi2 of templates (separate column for each)
     # - row and column in which this overlap is found in the file (if row=None, means that the overlaps are found in the columns only)
     cursor.execute("CREATE TABLE bank (k INTEGER, filename_id INTEGER, m1 REAL, m2 REAL, chi1 REAL, chi2 REAL, row INTEGER, column INTEGER);")
     # Populate Table 1:
     for filename in filenames:
         cursor.execute("INSERT INTO fname (filename) VALUES (?)", (filename,))
     connection.commit()
     cursor.execute("CREATE INDEX filename_index ON fname (filename)")
     progress = ProgressBar(max=len(filenames))
     # Populate Table 2:
     for filename in filenames:
         progress.increment(text=filename)
         try:
             f = h5py.File(filename,"r")
             nrows = f[f.keys()[0]]['overlaps'].shape[0] # find number of rows in subbank file
             for i, (m1, m2, chi1, chi2) in enumerate(zip(f[f.keys()[0]]['mass1'].value, f[f.keys()[0]]['mass2'].value, f[f.keys()[0]]['spin1z'].value, f[f.keys()[0]]['spin2z'].value)):
                 cursor.execute("INSERT INTO bank (filename_id, m1, m2, chi1, chi2, row, column) VALUES ((SELECT filename_id FROM fname WHERE filename = ?),?,?,?,?,?,?)", (filename, m1, m2, chi1, chi2, i if i <= nrows else None, i))
                 #FIXME: After building the database, confirm that each template only appears once in one row of the hdf5 files
         except: # ignore corrupted/broken subbank files
             print "Cannot load h5py file:", filename
     cursor.execute("CREATE INDEX template_index ON bank (m1, m2, chi1, chi2)")
     cursor.execute("CREATE INDEX filename_id_index ON bank (filename_id)")
     cursor.execute("CREATE TEMPORARY TABLE template AS SELECT DISTINCT m1,m2,chi1,chi2 FROM bank;")
     cursor.execute("CREATE INDEX tmp ON template (m1,m2,chi1,chi2);")
     cursor.execute("UPDATE bank SET k = (SELECT rowid-(SELECT MIN(rowid) FROM template) FROM template WHERE m1 = bank.m1 and m2 = bank.m2 and chi1 = bank.chi1 and chi2 = bank.chi2);") # populate column k
     cursor.execute("DROP TABLE template;")
     cursor.execute("CREATE INDEX k_index ON bank (k);")
     connection.commit()
     return connection
 def make_db(db_filename, filenames):
     # Make the sqlite database if one does not exist
     connection = sqlite3.connect(db_filename)
     cursor = connection.cursor()
     # Create Table 1 that contains:
     # - the filenames of the hdf5 subbank files
     # - their respective ID integers
     cursor.execute("CREATE TABLE fname (filename_id INTEGER PRIMARY KEY, filename TEXT);")
     # Create Table 2 that contains:
     # - k (integers which will be populated later, this column gives the order in which overlaps are placed),
     # - ID integers of the filenames (instead of saving the entire filename to the database - this is to save memory)
     # - m1, m2, chi1, chi2 of templates (separate column for each)
     # - row and column in which this overlap is found in the file (if row=None, means that the overlaps are found in the columns only)
     cursor.execute("CREATE TABLE bank (k INTEGER, filename_id INTEGER, m1 REAL, m2 REAL, chi1 REAL, chi2 REAL, row INTEGER, column INTEGER);")
     # Populate Table 1:
     for filename in filenames:
         cursor.execute("INSERT INTO fname (filename) VALUES (?)", (filename,))
     connection.commit()
     cursor.execute("CREATE INDEX filename_index ON fname (filename)")
     progress = ProgressBar(max=len(filenames))
     # Populate Table 2:
     for filename in filenames:
         progress.increment(text=filename)
         try:
             f = h5py.File(filename,"r")
             nrows = f[f.keys()[0]]['overlaps'].shape[0] # find number of rows in subbank file
             for i, (m1, m2, chi1, chi2) in enumerate(zip(f[f.keys()[0]]['mass1'].value, f[f.keys()[0]]['mass2'].value, f[f.keys()[0]]['spin1z'].value, f[f.keys()[0]]['spin2z'].value)):
                 cursor.execute("INSERT INTO bank (filename_id, m1, m2, chi1, chi2, row, column) VALUES ((SELECT filename_id FROM fname WHERE filename = ?),?,?,?,?,?,?)", (filename, m1, m2, chi1, chi2, i if i <= nrows else None, i))
                 #FIXME: After building the database, confirm that each template only appears once in one row of the hdf5 files
         except: # ignore corrupted/broken subbank files
             print "Cannot load h5py file:", filename
     cursor.execute("CREATE INDEX template_index ON bank (m1, m2, chi1, chi2)")
     cursor.execute("CREATE INDEX filename_id_index ON bank (filename_id)")
     cursor.execute("CREATE TEMPORARY TABLE template AS SELECT DISTINCT m1,m2,chi1,chi2 FROM bank;")
     cursor.execute("CREATE INDEX tmp ON template (m1,m2,chi1,chi2);")
     cursor.execute("UPDATE bank SET k = (SELECT rowid-(SELECT MIN(rowid) FROM template) FROM template WHERE m1 = bank.m1 and m2 = bank.m2 and chi1 = bank.chi1 and chi2 = bank.chi2);") # populate column k
     cursor.execute("DROP TABLE template;")
     cursor.execute("CREATE INDEX k_index ON bank (k);")
     connection.commit()
     return connection
Beispiel #9
0
 def is_healthy(self, verbose=False):
     # do we believe the PDFs are sufficiently well-defined to
     # compute ln L?  not healthy until at least one instrument
     # in the analysis has produced triggers, and until all that
     # have produced triggers have each produced at least 10
     # million.
     # NOTE:  this will go badly if a detector that has never
     # produced triggers, say because it joins an observing run
     # late, suddenly starts producing triggers between snapshot
     # cycles of an online analysis.  we're assuming, here, that
     # detectors join science runs not at random times, but at
     # scheduled times, say, during maintenance breaks, and that
     # the analysis will not be collecting any candidates for
     # approximately one snapshot interval around the addition
     # of the new detector.
     nonzero_counts = [
         count for count in self.denominator.triggerrates.counts.values()
         if count
     ]
     health = 0. if not nonzero_counts else min(nonzero_counts) / 10000000.
     if verbose:
         ProgressBar(text="ranking stat. health", value=health).show()
     return health >= 1.
Beispiel #10
0
for n, filename in enumerate(filenames, 1):
    if options.verbose:
        print("%d/%d: loading %s ..." % (n, len(filenames), filename),
              file=sys.stderr)
    img = Image.open(filename)

    width, height = img.size
    width, height = int(round(width * options.height /
                              float(height))), options.height
    if options.verbose:
        print("converting to %dx%d grayscale ... " % (width, height),
              file=sys.stderr)
    img = img.resize((width, height)).convert("L")

    progress = ProgressBar("computing pixels", max=width *
                           height) if options.verbose else None
    for i in range(width):
        for j in range(height):
            if progress is not None:
                progress.increment()
            # amplitude.  hrss column is ignored by waveform
            # generation code.  it is included for convenience,
            # to record the desired pixel brightness.  because
            # band- and time-limited white-noise burst
            # waveforms are random, the waveform's final
            # ampltiude (in the egw_over_rsquared column) is
            # determined by generating the burst at a canonical
            # amplitude, measuring its hrss, then rescaling to
            # achieve the desired value.  this process requires
            # the final sample rate to be known.
            hrss = options.hrss_scale * img.getpixel(
Beispiel #11
0
    help='Name of input file generated by bayestar_aggregate_found_injections')
opts = parser.parse_args()

# Imports.
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import os
from distutils.dir_util import mkpath
import numpy as np
from glue.text_progress_bar import ProgressBar
from lalinference import plot

# Create progress bar.
pb = ProgressBar()
pb.update(-1, 'reading data')

# Read in all of the datasets listed as positional command line arguments.
datasets_ = [
    np.recfromtxt(file, names=True, usemask=True) for file in opts.input
]
dataset_names = [os.path.splitext(file.name)[0] for file in opts.input]

# For each of the quantities that we are going to plot, find their range
# over all of the datasets.
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
have_offset = all('offset' in dataset.dtype.names for dataset in datasets_)
have_runtime = all('runtime' in dataset.dtype.names for dataset in datasets_)
Beispiel #12
0
        far = float('nan')
    distmean = metadata.get('distmean', float('nan'))
    diststd = metadata.get('diststd', float('nan'))

    ret = [
        coinc_event_id, simulation_id, far, snr, searched_area, searched_prob,
        offset, runtime, distmean, diststd
    ] + contour_areas + area_probs
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == '__main__':
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()

    db = opts.db
    contours = opts.contour
    modes = opts.modes
    areas = opts.area

    progress.update(-1, 'spawning workers')
    if opts.jobs == 1:
        from six.moves import map
    else:
        try:
            from emcee.interruptible_pool import InterruptiblePool as Pool
        except ImportError:
            from multiprocessing import Pool
        map = Pool(opts.jobs, startup, (command.sqlite_get_filename(db),
    if snr is None:
        snr = float('nan')
    if far is None:
        far = float('nan')
    distmean = metadata.get('distmean', float('nan'))
    diststd = metadata.get('diststd', float('nan'))

    ret = [coinc_event_id, simulation_id, far, snr, searched_area, searched_prob, offset, runtime, distmean, diststd] + contour_areas + area_probs
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == '__main__':
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()

    db = opts.db
    contours = opts.contour
    modes = opts.modes
    areas = opts.area

    progress.update(-1, 'spawning workers')
    if opts.jobs == 1:
        from six.moves import map
    else:
        try:
            from emcee.interruptible_pool import InterruptiblePool as Pool
        except ImportError:
            from multiprocessing import Pool
        map = Pool(
import glue.lal
import lal
import lal.series
import lalsimulation
from glue.text_progress_bar import ProgressBar

# BAYESTAR imports.
from lalinference.bayestar import ligolw as ligolw_bayestar
from lalinference.bayestar import filter
from lalinference.bayestar import timing

# Other imports.
import numpy as np


progress = ProgressBar()

# Open output file.
progress.update(-1, 'setting up output document')
out_xmldoc = ligolw.Document()
out_xmldoc.appendChild(ligolw.LIGO_LW())

# Write process metadata to output file.
process = command.register_to_xmldoc(
    out_xmldoc, parser, opts, ifos=opts.detector,
    comment="Simulated coincidences")

# Add search summary to output file.
all_time = segments.segment(
    [glue.lal.LIGOTimeGPS(0), glue.lal.LIGOTimeGPS(2e9)])
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
rho.sort()
rho = np.insert(rho, 0, 0)

ln_p_j, p_j_templates, p_j_indices = find_ln_p_j_voronoi(
    mass, f, popt
)  #p_j_indices = indices of the templates for m1, m2 arrays (eg. p_j_indices[0] = 1, which means p_j_template[0] corresponds to the 1th template in m1, m2)
t_k = np.array(range(len(mass)))
ln_p_jk = np.log(np.zeros(
    (len(rho), len(t_k))))  # P(signal t_j is recovered by template t_k)

#p_j_indices.tolist().sort(key=ln_p_j.__getitem__) # doing loop in order of p_j (smallest to largest) for numerical accuracy
order = np.argsort(ln_p_j)
ln_p_j, p_j_templates, p_j_indices = ln_p_j[order], p_j_templates[
    order], p_j_indices[order]

progress = ProgressBar(max=len(p_j_indices))
for i in range(len(p_j_indices)):  # loop over all signal population
    progress.increment(text=str(p_j_templates[order][i]))
    ovrlp = overlap[p_j_indices[i]]
    for r in range(len(rho)):  # loop over all rho
        ln_p_jk[r, :] = np.logaddexp(
            ln_p_jk[r, :], ln_p_j[order][i] + ln_p_k(ovrlp, rho[r], t_k))

print "ln(P_jk) computed for all templates. Time elapsed:", time.time(
) - start_time

# Save data to hdf5 file
if save_data:
    directory = ""
    filename = "logP_vs_rho_" + datetime
    f = h5py.File(directory + filename + ".hdf5", "w")
import glue.lal
import lal
import lal.series
import lalsimulation
from lalinspiral.thinca import InspiralCoincDef
from glue.text_progress_bar import ProgressBar

# BAYESTAR imports.
from lalinference.bayestar import ligolw as ligolw_bayestar
from lalinference.bayestar import filter
from lalinference.bayestar import timing

# Other imports.
import numpy as np

progress = ProgressBar()

# Open output file.
progress.update(-1, 'setting up output document')
out_xmldoc = ligolw.Document()
out_xmldoc.appendChild(ligolw.LIGO_LW())

# Write process metadata to output file.
process = command.register_to_xmldoc(out_xmldoc,
                                     parser,
                                     opts,
                                     ifos=opts.detector,
                                     comment="Simulated coincidences")

# Add search summary to output file.
all_time = segments.segment(
    help='parser.add_argumentally plot a posterior sample chain [default: none]')
parser.add_argument(
    '--projection', type=int, choices=list(range(4)), default=0,
    help='Plot one specific projection [default: plot all projections]')
parser.add_argument(
    'input', metavar='INPUT.fits[.gz]', type=argparse.FileType('rb'),
    default='-', nargs='?', help='Input FITS file [default: stdin]')
parser.add_argument(
    '--align-to', metavar='SKYMAP.fits[.gz]', type=argparse.FileType('rb'),
    help='Align to the principal axes of this sky map [default: input sky map]')
parser.set_defaults(figure_width='3.5', figure_height='3.5')
opts = parser.parse_args()

# Create progress bar.
from glue.text_progress_bar import ProgressBar
progress = ProgressBar()
progress.update(-1, 'Starting up')

# Late imports
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib import transforms
from lalinference.io import fits
from lalinference.plot import marker
from lalinference.bayestar.distance import (
    principal_axes, volume_render, marginal_pdf)
import healpy as hp
import numpy as np
import scipy.stats

# Read input, determine input resolution.
Beispiel #18
0
 def is_healthy(self, verbose=False):
     if verbose:
         ProgressBar(text="ranking stat. health", value=1.).show()
     return True
n = 10
k = np.arange(1,n+1)
rho = 30*np.cos((2*k-1)*np.pi/(2*n)) + 30 # min(rho)~0, max(rho)~80
rho.sort()
rho = np.insert(rho,0,0)

ln_p_j, p_j_templates, p_j_indices = find_ln_p_j_voronoi(mass, f, popt) #p_j_indices = indices of the templates for m1, m2 arrays (eg. p_j_indices[0] = 1, which means p_j_template[0] corresponds to the 1th template in m1, m2)
t_k = np.array(range(len(mass)))
ln_p_jk = np.log(np.zeros((len(rho), len(t_k)))) # P(signal t_j is recovered by template t_k)

#p_j_indices.tolist().sort(key=ln_p_j.__getitem__) # doing loop in order of p_j (smallest to largest) for numerical accuracy
order = np.argsort(ln_p_j)
ln_p_j, p_j_templates, p_j_indices = ln_p_j[order], p_j_templates[order], p_j_indices[order]

progress = ProgressBar(max=len(p_j_indices))
for i in range(len(p_j_indices)): # loop over all signal population
    progress.increment(text=str(p_j_templates[order][i]))
    ovrlp = overlap[p_j_indices[i]]
    for r in range(len(rho)): # loop over all rho
        ln_p_jk[r,:] = np.logaddexp(ln_p_jk[r,:], ln_p_j[order][i]+ln_p_k(ovrlp, rho[r], t_k))
        
print "ln(P_jk) computed for all templates. Time elapsed:", time.time()-start_time

# Save data to hdf5 file
if save_data:
    directory = ""
    filename = "logP_vs_rho_"+datetime
    f = h5py.File(directory+filename+".hdf5","w")
    f.create_dataset('rho', data=rho)
    f.create_dataset('ln_P_jk', data=ln_p_jk)
    help='Name of input file generated by bayestar_aggregate_found_injections')
opts = parser.parse_args()

# Imports.
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import os
from distutils.dir_util import mkpath
import numpy as np
from glue.text_progress_bar import ProgressBar
from lalinference import plot

# Create progress bar.
pb = ProgressBar()
pb.update(-1, 'reading data')

# Read in all of the datasets listed as positional command line arguments.
datasets_ = [np.recfromtxt(file, names=True, usemask=True) for file in opts.input]
dataset_names = [os.path.splitext(file.name)[0] for file in opts.input]

# For each of the quantities that we are going to plot, find their range
# over all of the datasets.
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
have_offset = all('offset' in dataset.dtype.names for dataset in datasets_)
have_runtime = all('runtime' in dataset.dtype.names for dataset in datasets_)
have_searched_prob_dist = all('searched_prob_dist' in dataset.dtype.names for dataset in datasets_)
have_searched_prob_vol = all('searched_prob_vol' in dataset.dtype.names for dataset in datasets_)
Beispiel #21
0
# Late imports

import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import healpy as hp
import lal
from lalinference.io import fits
from lalinference import plot
from glue.text_progress_bar import ProgressBar

fig = plt.figure(frameon=False)
ax = plt.axes(projection='mollweide')
ax.grid()

progress = ProgressBar()

progress.update(-1, 'obtaining filenames of sky maps')
fitsfilenames = tuple(command.chainglob(opts.fitsfileglobs))

progress.max = len(fitsfilenames)

matplotlib.rc('path', simplify=True, simplify_threshold=1)

if opts.colormap is None:
    colors = ['k'] * len(fitsfilenames)
else:
    colors = matplotlib.cm.get_cmap(opts.colormap)
    colors = colors(np.linspace(0, 1, len(fitsfilenames)))
for count_records, (color,
                    fitsfilename) in enumerate(zip(colors, fitsfilenames)):
Beispiel #22
0
        areas=areas, modes=modes, nest=metadata['nest'])

    if snr is None:
        snr = float('nan')
    if far is None:
        far = float('nan')

    ret = [coinc_event_id, simulation_id, far, snr, searched_area, searched_prob, offset, runtime] + contour_areas + area_probs
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == '__main__':
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()

    progress.update(-1, 'spawning {0} workers'.format(opts.jobs))
    startupargs = (dbfilename, opts.contour, opts.modes, opts.area)
    if opts.jobs == 1:
        from itertools import imap
    else:
        import multiprocessing
        imap = multiprocessing.Pool(opts.jobs, startup, startupargs).imap_unordered
    startup(*startupargs)

    progress.update(-1, 'obtaining filenames of sky maps')
    fitsfilenames = tuple(command.chainglob(fitsfileglobs))

    colnames = ['coinc_event_id', 'simulation_id', 'far', 'snr', 'searched_area',
        'searched_prob', 'offset', 'runtime'] + ["area({0:g})".format(p)
    diststd = sky_map.meta.get('diststd', np.nan)
    log_bci = sky_map.meta.get('log_bci', np.nan)
    log_bsn = sky_map.meta.get('log_bsn', np.nan)

    ret = [coinc_event_id, simulation_id, far, snr, searched_area,
           searched_prob, searched_prob_dist, searched_vol, searched_prob_vol,
           offset, runtime, distmean, diststd, log_bci, log_bsn] \
          + contour_areas + area_probs + contour_dists + contour_vols
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == '__main__':
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()

    db = opts.db
    contours = opts.contour
    modes = opts.modes
    areas = opts.area

    progress.update(-1, 'spawning workers')
    if opts.jobs == 1:
        from six.moves import map
    else:
        try:
            from emcee.interruptible_pool import InterruptiblePool as Pool
        except ImportError:
            from multiprocessing import Pool
        map = Pool(
def main(args=None):
    opts = parser().parse_args(args)

    # Create progress bar.
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()
    progress.update(-1, 'Starting up')

    # Late imports
    from matplotlib import pyplot as plt
    from matplotlib import gridspec
    from matplotlib import transforms
    from .. import io
    from ..plot import marker
    from ..distance import (parameters_to_marginal_moments, principal_axes,
                            volume_render, marginal_pdf)
    import healpy as hp
    import numpy as np
    import scipy.stats
    import seaborn

    # Read input, determine input resolution.
    progress.update(-1, 'Loading FITS file')
    (prob, mu, sigma, norm), metadata = io.read_sky_map(opts.input.name,
                                                        distances=True)
    npix = len(prob)
    nside = hp.npix2nside(npix)

    progress.update(-1, 'Preparing projection')

    if opts.align_to is None or opts.input.name == opts.align_to.name:
        prob2, mu2, sigma2, norm2 = prob, mu, sigma, norm
    else:
        (prob2, mu2, sigma2, norm2), _ = io.read_sky_map(opts.align_to.name,
                                                         distances=True)
    if opts.max_distance is None:
        mean, std = parameters_to_marginal_moments(prob2, mu2, sigma2)
        max_distance = mean + 2.5 * std
    else:
        max_distance = opts.max_distance
    rot = np.ascontiguousarray(principal_axes(prob2, mu2, sigma2))

    if opts.chain:
        chain = io.read_samples(opts.chain.name)
        chain = np.dot(rot.T,
                       (hp.ang2vec(0.5 * np.pi - chain['dec'], chain['ra']) *
                        np.atleast_2d(chain['dist']).T).T)

    fig = plt.figure(frameon=False)
    n = 1 if opts.projection else 2
    gs = gridspec.GridSpec(n,
                           n,
                           left=0.01,
                           right=0.99,
                           bottom=0.01,
                           top=0.99,
                           wspace=0.05,
                           hspace=0.05)

    imgwidth = int(opts.dpi * opts.figure_width / n)
    s = np.linspace(-max_distance, max_distance, imgwidth)
    xx, yy = np.meshgrid(s, s)

    # Color palette for markers
    colors = seaborn.color_palette(n_colors=len(opts.radecdist) + 1)

    truth_marker = marker.reticle(inner=0.5 * np.sqrt(2),
                                  outer=1.5 * np.sqrt(2),
                                  angle=45)

    for iface, (axis0, axis1, (sp0, sp1)) in enumerate((
        (1, 0, [0, 0]),
        (0, 2, [1, 1]),
        (1, 2, [1, 0]),
    )):

        if opts.projection and opts.projection != iface + 1:
            continue

        progress.update(text='Plotting projection {0}'.format(iface + 1))

        # Marginalize onto the given face
        density = volume_render(xx.ravel(), yy.ravel(), max_distance, axis0,
                                axis1, rot, False, prob, mu, sigma,
                                norm).reshape(xx.shape)

        # Plot heat map
        ax = fig.add_subplot(gs[0, 0] if opts.projection else gs[sp0, sp1],
                             aspect=1)
        ax.imshow(
            density,
            origin='lower',
            extent=[-max_distance, max_distance, -max_distance, max_distance],
            cmap=opts.colormap)

        # Add contours if requested
        if opts.contour:
            flattened_density = density.ravel()
            indices = np.argsort(flattened_density)[::-1]
            cumsum = np.empty_like(flattened_density)
            cs = np.cumsum(flattened_density[indices])
            cumsum[indices] = cs / cs[-1] * 100
            cumsum = np.reshape(cumsum, density.shape)
            u, v = np.meshgrid(s, s)
            contourset = ax.contour(u,
                                    v,
                                    cumsum,
                                    levels=opts.contour,
                                    linewidths=0.5)

        # Mark locations
        for (ra, dec, dist), color in zip(opts.radecdist, colors[1:]):
            theta = 0.5 * np.pi - np.deg2rad(dec)
            phi = np.deg2rad(ra)
            xyz = np.dot(rot.T, hp.ang2vec(theta, phi) * dist)
            ax.plot(xyz[axis0],
                    xyz[axis1],
                    marker=truth_marker,
                    markeredgecolor=color,
                    markerfacecolor='none',
                    markeredgewidth=1)

        # Plot chain
        if opts.chain:
            ax.plot(chain[axis0], chain[axis1], '.k', markersize=0.5)

        # Hide axes ticks
        ax.set_xticks([])
        ax.set_yticks([])

        # Set axis limits
        ax.set_xlim([-max_distance, max_distance])
        ax.set_ylim([-max_distance, max_distance])

        # Mark origin (Earth)
        ax.plot([0], [0],
                marker=marker.earth,
                markersize=5,
                markerfacecolor='none',
                markeredgecolor='black',
                markeredgewidth=0.75)

        if iface == 2:
            ax.invert_xaxis()

    # Add contour labels if contours requested
    if opts.contour:
        ax.clabel(contourset, fmt='%d%%', fontsize=7)

    if not opts.projection:
        # Add scale bar, 1/4 width of the plot
        ax.plot([0.0625, 0.3125], [0.0625, 0.0625],
                color='black',
                linewidth=1,
                transform=ax.transAxes)
        ax.text(0.0625,
                0.0625,
                '{0:d} Mpc'.format(int(np.round(0.5 * max_distance))),
                fontsize=8,
                transform=ax.transAxes,
                verticalalignment='bottom')

        # Create marginal distance plot.
        progress.update(-1, 'Plotting distance')
        gs1 = gridspec.GridSpecFromSubplotSpec(5, 5, gs[0, 1])
        ax = fig.add_subplot(gs1[1:-1, 1:-1])

        # Plot marginal distance distribution, integrated over the whole sky.
        d = np.linspace(0, max_distance)
        ax.fill_between(d,
                        marginal_pdf(d, prob, mu, sigma, norm),
                        alpha=0.5,
                        color=colors[0])

        # Plot conditional distance distribution at true position
        # and mark true distance.
        for (ra, dec, dist), color in zip(opts.radecdist, colors[1:]):
            theta = 0.5 * np.pi - np.deg2rad(dec)
            phi = np.deg2rad(ra)
            ipix = hp.ang2pix(nside, theta, phi)
            ax.fill_between(d,
                            scipy.stats.norm(mu[ipix], sigma[ipix]).pdf(d) *
                            norm[ipix] * np.square(d),
                            alpha=0.5,
                            color=color)
            ax.axvline(dist, color='black', linewidth=0.5)
            ax.plot([dist], [-0.15],
                    marker=truth_marker,
                    markeredgecolor=color,
                    markerfacecolor='none',
                    markeredgewidth=1,
                    clip_on=False,
                    transform=transforms.blended_transform_factory(
                        ax.transData, ax.transAxes))
            ax.axvline(dist, color='black', linewidth=0.5)

        # Scale axes
        ax.set_xticks([0, max_distance])
        ax.set_xticklabels(
            ['0', "{0:d}\nMpc".format(int(np.round(max_distance)))],
            fontsize=9)
        ax.set_yticks([])
        ax.set_xlim(0, max_distance)
        ax.set_ylim(0, ax.get_ylim()[1])

        if opts.annotate:
            text = []
            try:
                objid = metadata['objid']
            except KeyError:
                pass
            else:
                text.append('event ID: {}'.format(objid))
            try:
                distmean = metadata['distmean']
                diststd = metadata['diststd']
            except KeyError:
                pass
            else:
                text.append(u'distance: {}±{} Mpc'.format(
                    int(np.round(distmean)), int(np.round(diststd))))
            ax.text(0,
                    1,
                    '\n'.join(text),
                    transform=ax.transAxes,
                    fontsize=7,
                    ha='left',
                    va='bottom',
                    clip_on=False)

    progress.update(-1, 'Saving')
    opts.output()
Beispiel #25
0
def cluster_events(events, testfunc, clusterfunc, sortfunc = None, bailoutfunc = None, verbose = False):
	"""
	Cluster the events in an event list.  testfunc will be passed a
	pair of events in random order, and must return 0 (or False) if
	they should be clustered.  clusterfunc will be passed a pair of
	events in random order, and must return an event that is the
	"cluster" of the two.  clusterfunc is free to return a new events,
	or modify one or the other of its parameters in place and return
	it.

	If sortfunc and bailoutfunc are both not None (if one is provided
	the other must be as well), the events will be sorted into
	"increasing" order using sortfunc as a comparison operator, and
	then only pairs of events for which bailoutfunc returns 0 (or
	False) will be considered for clustering.

	The return value is True if the events in the event list were
	modified, and False if they were not (although their order might
	have changed).
	"""
	# changed indicates if the event list has changed
	changed = False
	while True:
		if verbose:
			progress = ProgressBar("clustering %d events" % len(events), max = len(events))
			progress.show()
		else:
			progress = None

		if sortfunc is not None:
			events.sort(sortfunc)

		# outer_did_cluster indicates if the event list changes on
		# this pass
		outer_did_cluster = False
		i = 0
		while i < len(events):
			if progress is not None:
				progress.update(i)
			if events[i] is not None:
				# inner_did_cluster indicates if events[i]
				# has changed
				inner_did_cluster = False
				for j, event_j in enumerate(events[i + 1:], 1):
					if event_j is not None:
						if not testfunc(events[i], event_j):
							events[i] = clusterfunc(events[i], event_j)
							events[i + j] = None
							inner_did_cluster = True
						elif (sortfunc is not None) and bailoutfunc(events[i], event_j):
							break
				if inner_did_cluster:
					outer_did_cluster = True
					# don't advance until events[i]
					# stops changing
					continue
			# events[i] has not changed
			i += 1
		del progress
		# repeat until we do a pass without the listing changing
		if not outer_did_cluster:
			break
		iterutils.inplace_filter(lambda event: event is not None, events)
		changed = True
	return changed
Beispiel #26
0
    ]).parse_args()

# Imports.
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import scipy.stats
import os
import subprocess
import numpy as np
from glue.text_progress_bar import ProgressBar
import lalinference.plot

# Create progress bar.
pb = ProgressBar()
pb.update(-1, 'reading data')

# Read in all of the datasets listed as positional command line arguments.
datasets_ = [np.recfromtxt(arg, names=True, usemask=True) for arg in args]
dataset_names = [os.path.splitext(arg)[0] for arg in args]

# For each of the quantities that we are going to plot, find their range
# over all of the datasets.
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
combined = np.concatenate([dataset['offset'] for dataset in datasets_])
min_offset = np.min(combined)
max_offset = np.max(combined)
combined = np.concatenate([dataset['runtime'] for dataset in datasets_])
    help='Name of input file generated by bayestar_aggregate_found_injections')
opts = parser.parse_args()

# Imports.
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import os
from distutils.dir_util import mkpath
import numpy as np
from glue.text_progress_bar import ProgressBar
from lalinference import plot

# Create progress bar.
pb = ProgressBar()
pb.update(-1, 'reading data')

# Read in all of the datasets listed as positional command line arguments.
datasets_ = [np.recfromtxt(file, names=True, usemask=True) for file in opts.input]
dataset_names = [os.path.splitext(file.name)[0] for file in opts.input]

# For each of the quantities that we are going to plot, find their range
# over all of the datasets.
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
combined = np.concatenate([dataset['offset'] for dataset in datasets_])
min_offset = np.min(combined)
max_offset = np.max(combined)
combined = np.concatenate([dataset['runtime'] for dataset in datasets_])
Beispiel #28
0
# Late imports

import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import healpy as hp
import lal
from lalinference.io import fits
from lalinference import plot
from glue.text_progress_bar import ProgressBar

fig = plt.figure(frameon=False)
ax = plt.axes(projection='mollweide')
ax.grid()

progress = ProgressBar()

progress.max = len(opts.fitsfilenames)

matplotlib.rc('path', simplify=True, simplify_threshold=1)

if opts.colormap is None:
    colors = ['k'] * len(opts.fitsfilenames)
else:
    colors = matplotlib.cm.get_cmap(opts.colormap)
    colors = colors(np.linspace(0, 1, len(opts.fitsfilenames)))
for count_records, (color,
                    fitsfilename) in enumerate(zip(colors,
                                                   opts.fitsfilenames)):
    progress.update(count_records, fitsfilename)
    skymap, metadata = fits.read_sky_map(fitsfilename, nest=None)
Beispiel #29
0
# FIXME: Determine appropriate boundaries to looping over lots of points that
# we are going to skip.
#
# T. Cokelaer (2007, http://dx.doi.org/10.1103/PhysRevD.76.102004) describes
# relationships between the component mass limits and the (tau0, tau3)
# boundaries.
n = 800
i0, i1 = np.mgrid[-n:n + 1, -n:n + 1]
i = np.column_stack((i0.ravel(), i1.ravel()))

# FIXME: Come up with a more natural way to specify the template spacing.
skip = 10
theta0_theta3 = np.dot(i, skip * delta_theta0_theta3.T) + initial_theta0_theta3

for th0, th3 in ProgressBar(theta0_theta3.tolist()):

    th3S = 0
    tau0 = th0 / (2 * np.pi * f_low)
    tau3 = -th3 / (2 * np.pi * f_low)

    mchirp, eta, chi = lalsimulation.SimInspiralTaylorF2RedSpinMchirpEtaChiFromChirpTimes(
        th0, th3, th3S, f_low)

    # Skip if either mchirp, eta, or chi are unphysical, unless this is the
    # initial point, which may be slightly unphysical just due to roundoff
    if np.all([th0, th3] == initial_theta0_theta3):
        mchirp = initial_mchirp
        eta = initial_eta
        mass1 = opts.initial_mass1
        mass2 = opts.initial_mass2
Beispiel #30
0
import functools
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
import lal
from lalinference import fits
from lalinference import plot
from glue.text_progress_bar import ProgressBar


fig = plt.figure(figsize=(opts.figure_width, opts.figure_height), frameon=False)
ax = plt.subplot(111, projection='mollweide')
ax.cla()
ax.grid()

progress = ProgressBar()

progress.update(-1, 'obtaining filenames of sky maps')
fitsfilenames = tuple(command.chainglob(args))

progress.max = len(fitsfilenames)

matplotlib.rc('path', simplify=True, simplify_threshold=1)

for count_records, fitsfilename in enumerate(fitsfilenames):
    progress.update(count_records, fitsfilename)
    skymap, metadata = fits.read_sky_map(fitsfilename, nest=None)
    nside = hp.npix2nside(len(skymap))
    gmst = lal.GreenwichMeanSiderealTime(metadata['gps_time']) % (2*np.pi)

    indices = np.argsort(-skymap)
    help='optionally plot a posterior sample chain [default: none]')
parser.add_argument(
    '--projection', type=int, choices=list(range(4)), default=0,
    help='Plot one specific projection [default: plot all projections]')
parser.add_argument(
    'input', metavar='INPUT.fits[.gz]', type=argparse.FileType('rb'),
    default='-', nargs='?', help='Input FITS file [default: stdin]')
parser.add_argument(
    '--align-to', metavar='SKYMAP.fits[.gz]', type=argparse.FileType('rb'),
    help='Align to the principal axes of this sky map [default: input sky map]')
parser.set_defaults(figure_width='3.5', figure_height='3.5')
opts = parser.parse_args()

# Create progress bar.
from glue.text_progress_bar import ProgressBar
progress = ProgressBar()
progress.update(-1, 'Starting up')

# Late imports
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib import transforms
from lalinference import io
from lalinference.plot import marker
from lalinference.bayestar.distance import (
    principal_axes, volume_render, marginal_pdf, marginal_ppf)
import healpy as hp
import numpy as np
import scipy.stats

# Read input, determine input resolution.
    diststd = sky_map.meta.get('diststd', np.nan)
    log_bci = sky_map.meta.get('log_bci', np.nan)
    log_bsn = sky_map.meta.get('log_bsn', np.nan)

    ret = [coinc_event_id, simulation_id, far, snr, searched_area,
           searched_prob, searched_prob_dist, searched_vol, searched_prob_vol,
           offset, runtime, distmean, diststd, log_bci, log_bsn] \
          + contour_areas + area_probs + contour_dists + contour_vols
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == '__main__':
    from glue.text_progress_bar import ProgressBar
    progress = ProgressBar()

    db = opts.db
    contours = opts.contour
    modes = opts.modes
    areas = opts.area

    progress.update(-1, 'spawning workers')
    if opts.jobs == 1:
        from six.moves import map
    else:
        try:
            from emcee.interruptible_pool import InterruptiblePool as Pool
        except ImportError:
            from multiprocessing import Pool
        map = Pool(
    if snr is None:
        snr = float("nan")
    if far is None:
        far = float("nan")

    ret = [coinc_event_id, simulation_id, far, snr, searched_area, searched_prob, offset, runtime] + contour_areas
    if modes:
        ret += [searched_modes] + contour_modes
    return ret


if __name__ == "__main__":
    from glue.text_progress_bar import ProgressBar

    progress = ProgressBar()

    progress.update(-1, "spawning {0} workers".format(opts.jobs))
    startupargs = (dbfilename, opts.contour, opts.modes)
    if opts.jobs == 1:
        from itertools import imap
    else:
        import multiprocessing

        imap = multiprocessing.Pool(opts.jobs, startup, startupargs).imap_unordered
    startup(*startupargs)

    progress.update(-1, "obtaining filenames of sky maps")
    fitsfilenames = tuple(command.chainglob(fitsfileglobs))

    colnames = [
Beispiel #34
0
def cluster_events(events,
                   testfunc,
                   clusterfunc,
                   sortfunc=None,
                   bailoutfunc=None,
                   verbose=False):
    """
	Cluster the events in an event list.  testfunc will be passed a
	pair of events in random order, and must return 0 (or False) if
	they should be clustered.  clusterfunc will be passed a pair of
	events in random order, and must return an event that is the
	"cluster" of the two.  clusterfunc is free to return a new events,
	or modify one or the other of its parameters in place and return
	it.

	If sortfunc and bailoutfunc are both not None (if one is provided
	the other must be as well), the events will be sorted into
	"increasing" order using sortfunc as a comparison operator, and
	then only pairs of events for which bailoutfunc returns 0 (or
	False) will be considered for clustering.

	The return value is True if the events in the event list were
	modified, and False if they were not (although their order might
	have changed).
	"""
    # changed indicates if the event list has changed
    changed = False
    while True:
        if verbose:
            progress = ProgressBar("clustering %d events" % len(events),
                                   max=len(events))
            progress.show()
        else:
            progress = None

        if sortfunc is not None:
            events.sort(sortfunc)

        # outer_did_cluster indicates if the event list changes on
        # this pass
        outer_did_cluster = False
        i = 0
        while i < len(events):
            if progress is not None:
                progress.update(i)
            if events[i] is not None:
                # inner_did_cluster indicates if events[i]
                # has changed
                inner_did_cluster = False
                for j, event_j in enumerate(events[i + 1:], 1):
                    if event_j is not None:
                        if not testfunc(events[i], event_j):
                            events[i] = clusterfunc(events[i], event_j)
                            events[i + j] = None
                            inner_did_cluster = True
                        elif (sortfunc is not None) and bailoutfunc(
                                events[i], event_j):
                            break
                if inner_did_cluster:
                    outer_did_cluster = True
                    # don't advance until events[i]
                    # stops changing
                    continue
            # events[i] has not changed
            i += 1
        del progress
        # repeat until we do a pass without the listing changing
        if not outer_did_cluster:
            break
        iterutils.inplace_filter(lambda event: event is not None, events)
        changed = True
    return changed
    help='Name of input file generated by bayestar_aggregate_found_injections')
opts = parser.parse_args()

# Imports.
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib import rcParams
import os
from distutils.dir_util import mkpath
import numpy as np
from glue.text_progress_bar import ProgressBar
from lalinference import plot

# Create progress bar.
pb = ProgressBar()
pb.update(-1, 'reading data')

# Read in all of the datasets listed as positional command line arguments.
datasets_ = [
    np.recfromtxt(file, names=True, usemask=True) for file in opts.input
]
dataset_names = [os.path.splitext(file.name)[0] for file in opts.input]

# For each of the quantities that we are going to plot, find their range
# over all of the datasets.
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
have_offset = all('offset' in dataset.dtype.names for dataset in datasets_)
have_runtime = all('runtime' in dataset.dtype.names for dataset in datasets_)