Beispiel #1
0
def injections_from_cli(opts):
    """Gets injection parameters from the inference file(s).

    Parameters
    ----------
    opts : argparser
        Argparser object that has the command-line objects to parse.

    Returns
    -------
    FieldArray
        Array of the injection parameters from all of the input files given
        by ``opts.input_file``.
    """
    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]
    parameters, _ = parse_parameters_opt(opts.parameters)
    if parameters is None:
        with InferenceFile(input_files[0], 'r') as fp:
            parameters = fp.variable_args
    injections = None
    # loop over all input files getting the injection files
    for input_file in input_files:
        # read injections from HDF input file as FieldArray
        these_injs = inject.InjectionSet(
            input_file,
            hdf_group=opts.injection_hdf_group).table.view(FieldArray)
        if injections is None:
            injections = these_injs
        else:
            injections = injections.append(these_injs)
    # check if need extra parameters than parameters stored in injection file
    _, ts = transforms.get_common_cbc_transforms(parameters,
                                                 injections.fieldnames)
    # add parameters not included in injection file
    injections = transforms.apply_transforms(injections, ts)
    return injections
Beispiel #2
0
from matplotlib import rcParams
rcParams['font.size'] = 16

# The data files store posteriors for the variable parameters ( `variable_args` ) in the MCMC. To check the parameter names that are stored in the posteriors file from the analysis of any of the events (say GW150914), use the appropriate file and do :

# In[2]:

fp = h5py.File("posteriors/GW150914/gw150914_posteriors_thinned.hdf", "r")
print fp.attrs['variable_args']
fp.close()

# Posteriors for the `variable_args` are stored inside the `samples` group of the hdf data files. PyCBC provides a handler class `pycbc.io.InferenceFile` that extends `h5py.File`. To read the output file and get samples for any of the `variable_args`, say `mass1`, do :

# In[3]:

fp = InferenceFile("posteriors/GW150914/gw150914_posteriors_thinned.hdf", "r")
mass1_samples = fp['samples/mass1'][:]
print mass1_samples

# Using the PyCBC software, one can map the posteriors for the `variable_args` to posteriors of other parameters. PyCBC has several functions that can be used to do the general transforms between parameters. You can look in `pycbc.conversions`, `pycbc.coordinates`, `pycbc.cosmology` for the possible transforms that you can do with the available `variable_args`. For example, to obtain the the chirp mass of the binary `mchirp` from the component masses `(mass1, mass2)` one would do :

# In[4]:

from pycbc import conversions

fp = InferenceFile("posteriors/GW150914/gw150914_posteriors_thinned.hdf", 'r')
mass1 = fp['samples/mass1'][:]
mass2 = fp['samples/mass2'][:]
fp.close()

mchirp = conversions.mchirp_from_mass1_mass2(mass1, mass2)
Beispiel #3
0
    os.makedirs(plotDir)

n_live_points = 5000
evidence_tolerance = 0.5

filename = '../plots/fitting_gws/Ka2017_FixZPT0/u_g_r_i_z_y_J_H_K/0_14/joint/GW170817/1.00/0_640/q_lambdatilde.dat'
data = np.loadtxt(filename)
q_em = data[:, 0]
lambdatilde_em = data[:, 1]

filename = '/home/mcoughlin/gw170817-common-eos/uniform_mass_prior_common_eos_posteriors.hdf'
lambda1 = 'lambdasym*((mass2/mass1)**3)'
lambda2 = 'lambdasym*((mass1/mass2)**3)'
# read samples
params = [lambda1, lambda2]
with InferenceFile(filename, "r") as fp:
    samples = fp.read_samples(params)
mass1 = samples['mass1'][:]
mass2 = samples['mass2'][:]
lambdasym = samples['lambdasym'][:]
lambda1 = lambdasym * ((mass2 / mass1)**3)
lambda2 = lambdasym * ((mass1 / mass2)**3)
lambdatilde_gw = conversions.lambda_tilde(mass1, mass2, lambda1, lambda2)
q_gw = mass1 / mass2

pts_em = np.vstack((q_em, lambdatilde_em)).T
pts_gw = np.vstack((q_gw, lambdatilde_gw)).T
kdedir_em = greedy_kde_areas_2d(pts_em)
kdedir_gw = greedy_kde_areas_2d(pts_gw)

parameters = ["q", "lambdatilde"]
Beispiel #4
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    fp_all : pycbc.io.InferenceFile
        The result file as an InferenceFile. If more than one input file,
        then it returns a list.
    parameters_all : list
        List of the parameters to use, parsed from the parameters option.
        If more than one input file, then it returns a list.
    labels_all : list
        List of labels to associate with the parameters. If more than one
        input file, then it returns a list.
    samples_all : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    """

    # lists for files and samples from all input files
    fp_all = []
    parameters_all = []
    labels_all = []
    samples_all = []

    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]

    # loop over all input files
    for input_file in input_files:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = InferenceFile(input_file, "r")

        # get parameters and a dict of labels for each parameter
        parameters = fp.variable_args if opts.parameters is None \
                         else opts.parameters
        parameters, ldict = parse_parameters_opt(parameters)

        # convert labels dict to list
        labels = []
        for p in parameters:
            try:
                label = ldict[p]
            except KeyError:
                label = fp.read_label(p)
            labels.append(label)

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)

            # read samples from file
            samples = fp.read_samples(
                file_parameters, thin_start=opts.thin_start,
                thin_interval=opts.thin_interval, thin_end=opts.thin_end,
                iteration=opts.iteration,
                samples_group=opts.parameters_group, **kwargs)

            # add parameters not included in file
            samples = transforms.apply_transforms(samples, ts)

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(input_files) > 1:
            fp_all.append(fp)
            parameters_all.append(parameters)
            labels_all.append(labels)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            parameters_all = parameters
            labels_all = labels
            samples_all = samples

    return fp_all, parameters_all, labels_all, samples_all
Beispiel #5
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    fp_all : pycbc.io.InferenceFile
        The result file as an InferenceFile. If more than one input file,
        then it returns a list.
    parameters_all : list
        List of the parameters to use, parsed from the parameters option.
        If more than one input file, then it returns a list.
    labels_all : list
        List of labels to associate with the parameters. If more than one
        input file, then it returns a list.
    samples_all : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    """

    # lists for files and samples from all input files
    fp_all = []
    parameters_all = []
    labels_all = []
    samples_all = []

    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]

    # loop over all input files
    input_files = [opts.input_file] if isinstance(opts.input_file, str) \
                                                           else opts.input_file
    for input_file in input_files:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = InferenceFile(input_file, "r")

        # get parameters and a dict of labels for each parameter
        parameters = fp.variable_args if opts.parameters is None \
                         else opts.parameters
        parameters, ldict = parse_parameters_opt(parameters)

        # convert labels dict to list
        labels = []
        for p in parameters:
            try:
                label = ldict[p]
            except KeyError:
                label = fp.read_label(p)
            labels.append(label)

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)

            # read samples from file
            samples = fp.read_samples(
                file_parameters, thin_start=opts.thin_start,
                thin_interval=opts.thin_interval, thin_end=opts.thin_end,
                iteration=opts.iteration,
                samples_group=opts.parameters_group, **kwargs)

            # add parameters not included in file
            samples = transforms.apply_transforms(samples, ts)

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(input_files) > 1:
            fp_all.append(fp)
            parameters_all.append(parameters)
            labels_all.append(labels)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            parameters_all = parameters
            labels_all = labels
            samples_all = samples

    return fp_all, parameters_all, labels_all, samples_all
Beispiel #6
0
def validate_checkpoint_files(checkpoint_file, backup_file):
    """Checks if the given checkpoint and/or backup files are valid.

    The checkpoint file is considered valid if:

        * it passes all tests run by ``InferenceFile.check_integrity``;
        * it has at least one sample written to it (indicating at least one
          checkpoint has happened).

    The same applies to the backup file. The backup file must also have the
    same number of samples as the checkpoint file, otherwise, the backup is
    considered invalid.

    If the checkpoint (backup) file is found to be valid, but the backup
    (checkpoint) file is not valid, then the checkpoint (backup) is copied to
    the backup (checkpoint). Thus, this function ensures that checkpoint and
    backup files are either both valid or both invalid.

    Parameters
    ----------
    checkpoint_file : string
        Name of the checkpoint file.
    backup_file : string
        Name of the backup file.

    Returns
    -------
    checkpoint_valid : bool
        Whether or not the checkpoint (and backup) file may be used for loading
        samples.
    """
    # check if checkpoint file exists and is valid
    logging.info("Validating checkpoint and backup files")
    try:
        check_integrity(checkpoint_file)
        checkpoint_valid = True
    except (ValueError, KeyError, IOError):
        checkpoint_valid = False
    # backup file
    try:
        check_integrity(backup_file)
        backup_valid = True
    except (ValueError, KeyError, IOError):
        backup_valid = False
    # check if there are any samples in the file; if not, we'll just start from
    # scratch
    if checkpoint_valid:
        with InferenceFile(checkpoint_file, 'r') as fp:
            try:
                group = '{}/{}'.format(fp.samples_group, fp.variable_args[0])
                nsamples = fp[group].size
                checkpoint_valid = nsamples != 0
            except KeyError:
                checkpoint_valid = False
    # check if there are any samples in the backup file
    if backup_valid:
        with InferenceFile(backup_file, 'r') as fp:
            try:
                group = '{}/{}'.format(fp.samples_group, fp.variable_args[0])
                backup_nsamples = fp[group].size
                backup_valid = backup_nsamples != 0
            except KeyError:
                backup_valid = False
    # check that the checkpoint and backup have the same number of samples;
    # if not, assume the checkpoint has the correct number
    if checkpoint_valid and backup_valid:
        backup_valid = nsamples == backup_nsamples
    # decide what to do based on the files' statuses
    if checkpoint_valid and not backup_valid:
        # copy the checkpoint to the backup
        logging.info("Backup invalid; copying checkpoint file")
        shutil.copy(checkpoint_file, backup_file)
        backup_valid = True
    elif backup_valid and not checkpoint_valid:
        logging.info("Checkpoint invalid; copying backup file")
        # copy the backup to the checkpoint
        shutil.copy(backup_file, checkpoint_file)
        checkpoint_valid = True
    return checkpoint_valid