예제 #1
0
 def __init__(self, generator, variable_args=(), **frozen_params):
     super(BaseCBCGenerator, self).__init__(generator,
                                            variable_args=variable_args,
                                            **frozen_params)
     # decorate the generator function with a list of functions that convert
     # parameters to those used by the waveform generation interface
     all_args = set(
         list(self.frozen_params.keys()) + list(self.variable_args))
     # compare a set of all args of the generator to the input parameters
     # of the functions that do conversions and adds to list of pregenerate
     # functions if it is needed
     params_used, cs = transforms.get_common_cbc_transforms(
         list(self.possible_args), variable_args)
     for c in cs:
         self._add_pregenerate(c)
     # check that there are no unused (non-calibration) parameters
     calib_args = set(
         [a for a in self.variable_args if a.startswith('calib_')])
     all_args = all_args - calib_args
     unused_args = all_args.difference(params_used) \
                           .difference(self.possible_args)
     if len(unused_args):
         logging.warning(
             "WARNING: The following args are not being used "
             "for waveform generation: %s", ', '.join(unused_args))
예제 #2
0
    def __init__(self, variable_args, constraint_arg, **kwargs):
        self.constraint_arg = constraint_arg

        # set any given attributes and get transforms from variable_args
        # to required parameters
        for kwarg in kwargs.keys():
            setattr(self, kwarg, kwargs[kwarg])
        _, self.transforms = transforms.get_common_cbc_transforms(
                                       self.required_parameters, variable_args)
예제 #3
0
    def __init__(self, variable_args, constraint_arg, **kwargs):
        self.constraint_arg = constraint_arg

        # set any given attributes and get transforms from variable_args
        # to required parameters
        for kwarg in kwargs.keys():
            setattr(self, kwarg, kwargs[kwarg])
        _, self.transforms = transforms.get_common_cbc_transforms(
            self.required_parameters, variable_args)
예제 #4
0
 def __init__(self, generator, variable_args=(), **frozen_params):
     super(BaseCBCGenerator, self).__init__(generator,
         variable_args=variable_args, **frozen_params)
     # decorate the generator function with a list of functions that convert
     # parameters to those used by the waveform generation interface
     all_args = set(self.frozen_params.keys() + list(self.variable_args))
     # compare a set of all args of the generator to the input parameters
     # of the functions that do conversions and adds to list of pregenerate
     # functions if it is needed
     params_used, cs = transforms.get_common_cbc_transforms(
                                    list(self.possible_args), variable_args)
     for c in cs:
         self._add_pregenerate(c)
     # check that there are no unused parameters
     unused_args = all_args.difference(params_used) \
                           .difference(self.possible_args)
     if len(unused_args):
         raise ValueError("The following args are not being used: "
                          "{opts}".format(opts=unused_args))
예제 #5
0
def injections_from_cli(opts):
    """Gets injection parameters from the inference file(s).

    Parameters
    ----------
    opts : argparser
        Argparser object that has the command-line objects to parse.

    Returns
    -------
    FieldArray
        Array of the injection parameters from all of the input files given
        by ``opts.input_file``.
    """
    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]
    parameters, _ = parse_parameters_opt(opts.parameters)
    if parameters is None:
        with InferenceFile(input_files[0], 'r') as fp:
            parameters = fp.variable_params
    injections = None
    # loop over all input files getting the injection files
    for input_file in input_files:
        # read injections from HDF input file as FieldArray
        these_injs = inject.InjectionSet(
            input_file,
            hdf_group=opts.injection_hdf_group,
        ).table.view(FieldArray)
        if injections is None:
            injections = these_injs
        else:
            injections = injections.append(these_injs)
    # check if need extra parameters than parameters stored in injection file
    _, ts = transforms.get_common_cbc_transforms(parameters,
                                                 injections.fieldnames)
    # add parameters not included in injection file
    injections = transforms.apply_transforms(injections, ts)
    return injections
예제 #6
0
 def __init__(self, generator, variable_args=(), **frozen_params):
     super(BaseCBCGenerator, self).__init__(generator,
         variable_args=variable_args, **frozen_params)
     # decorate the generator function with a list of functions that convert
     # parameters to those used by the waveform generation interface
     all_args = set(self.frozen_params.keys() + list(self.variable_args))
     # compare a set of all args of the generator to the input parameters
     # of the functions that do conversions and adds to list of pregenerate
     # functions if it is needed
     params_used, cs = transforms.get_common_cbc_transforms(
                                    list(self.possible_args), variable_args)
     for c in cs:
         self._add_pregenerate(c)
     # check that there are no unused (non-calibration) parameters
     calib_args = set([a for a in self.variable_args if
                       a.startswith('calib_')])
     all_args = all_args - calib_args
     unused_args = all_args.difference(params_used) \
                           .difference(self.possible_args)
     if len(unused_args):
         logging.warning("WARNING: The following args are not being used "
                         "for waveform generation: %s",
                         ', '.join(unused_args))
예제 #7
0
def injections_from_cli(opts):
    """Gets injection parameters from the inference file(s).

    Parameters
    ----------
    opts : argparser
        Argparser object that has the command-line objects to parse.

    Returns
    -------
    FieldArray
        Array of the injection parameters from all of the input files given
        by ``opts.input_file``.
    """
    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]
    parameters, _ = parse_parameters_opt(opts.parameters)
    if parameters is None:
        with InferenceFile(input_files[0], 'r') as fp:
            parameters = fp.variable_args
    injections = None
    # loop over all input files getting the injection files
    for input_file in input_files:
        # read injections from HDF input file as FieldArray
        these_injs = inject.InjectionSet(input_file,
            hdf_group=opts.injection_hdf_group).table.view(FieldArray)
        if injections is None:
            injections = these_injs
        else:
            injections = injections.append(these_injs)
    # check if need extra parameters than parameters stored in injection file
    _, ts = transforms.get_common_cbc_transforms(parameters,
                                                 injections.fieldnames)
    # add parameters not included in injection file
    injections = transforms.apply_transforms(injections, ts)
    return injections
예제 #8
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    fp_all : pycbc.io.InferenceFile
        The result file as an InferenceFile. If more than one input file,
        then it returns a list.
    parameters_all : list
        List of the parameters to use, parsed from the parameters option.
        If more than one input file, then it returns a list.
    labels_all : list
        List of labels to associate with the parameters. If more than one
        input file, then it returns a list.
    samples_all : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    """

    # lists for files and samples from all input files
    fp_all = []
    parameters_all = []
    labels_all = []
    samples_all = []

    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]

    # loop over all input files
    for input_file in input_files:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = InferenceFile(input_file, "r")

        # get parameters and a dict of labels for each parameter
        parameters = fp.variable_args if opts.parameters is None \
                         else opts.parameters
        parameters, ldict = parse_parameters_opt(parameters)

        # convert labels dict to list
        labels = []
        for p in parameters:
            try:
                label = ldict[p]
            except KeyError:
                label = fp.read_label(p)
            labels.append(label)

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)

            # read samples from file
            samples = fp.read_samples(
                file_parameters, thin_start=opts.thin_start,
                thin_interval=opts.thin_interval, thin_end=opts.thin_end,
                iteration=opts.iteration,
                samples_group=opts.parameters_group, **kwargs)

            # add parameters not included in file
            samples = transforms.apply_transforms(samples, ts)

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(input_files) > 1:
            fp_all.append(fp)
            parameters_all.append(parameters)
            labels_all.append(labels)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            parameters_all = parameters
            labels_all = labels
            samples_all = samples

    return fp_all, parameters_all, labels_all, samples_all
예제 #9
0
파일: __init__.py 프로젝트: vahi/pycbc
def results_from_cli(opts, load_samples=True, **kwargs):
    """Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : bool, optional
        Load the samples from the file.

    Returns
    -------
    fp_all : (list of) BaseInferenceFile type
        The result file as an hdf file. If more than one input file,
        then it returns a list.
    parameters : list of str
        List of the parameters to use, parsed from the parameters option.
    labels : dict
        Dictionary of labels to associate with the parameters.
    samples_all : (list of) FieldArray(s) or None
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    \**kwargs :
        Any other keyword arguments that are passed to read samples using
        samples_from_cli
    """

    # lists for files and samples from all input files
    fp_all = []
    samples_all = []

    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]

    # loop over all input files
    for input_file in input_files:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = loadfile(input_file, "r")

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = _transforms.get_common_cbc_transforms(
                opts.parameters, fp.variable_params)

            # read samples from file
            samples = fp.samples_from_cli(opts, parameters=file_parameters, **kwargs)

            logging.info("Using {} samples".format(samples.size))

            # add parameters not included in file
            samples = _transforms.apply_transforms(samples, ts)

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(input_files) > 1:
            fp_all.append(fp)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            samples_all = samples

    return fp_all, opts.parameters, opts.parameters_labels, samples_all
예제 #10
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    result_file : pycbc.io.InferenceFile
        The result file as an InferenceFile.
    parameters : list
        List of the parameters to use, parsed from the parameters option.
    labels : list
        List of labels to associate with the parameters.
    samples : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
    """

    logging.info("Reading input file")
    fp = InferenceFile(opts.input_file, "r")
    parameters = fp.variable_args if opts.parameters is None \
                 else opts.parameters

    # load the labels
    parameters, ldict = parse_parameters_opt(parameters)
    # convert labels dict to list
    labels = []
    for p in parameters:
        try:
            label = ldict[p]
        except KeyError:
            label = fp.read_label(p)
        labels.append(label)

    # load the samples
    if load_samples:
        logging.info("Loading samples")
        # check if need extra parameters for a non-sampling parameter
        file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)
        # read samples from file
        samples = fp.read_samples(file_parameters,
            thin_start=opts.thin_start, thin_interval=opts.thin_interval,
            thin_end=opts.thin_end, iteration=opts.iteration,
            samples_group=opts.parameters_group,
            **kwargs)
        # add parameters not included in file
        samples = transforms.apply_transforms(samples, ts)
    else:
        samples = None

    return fp, parameters, labels, samples
예제 #11
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    result_file : pycbc.io.InferenceFile
        The result file as an InferenceFile.
    parameters : list
        List of the parameters to use, parsed from the parameters option.
    labels : list
        List of labels to associate with the parameters.
    samples : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
    """

    logging.info("Reading input file")
    fp = InferenceFile(opts.input_file, "r")
    parameters = fp.variable_args if opts.parameters is None \
                 else opts.parameters

    # load the labels
    parameters, ldict = parse_parameters_opt(parameters)
    # convert labels dict to list
    labels = []
    for p in parameters:
        try:
            label = ldict[p]
        except KeyError:
            label = fp.read_label(p)
        labels.append(label)

    # load the samples
    if load_samples:
        logging.info("Loading samples")
        # check if need extra parameters for a non-sampling parameter
        file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)
        # read samples from file
        samples = fp.read_samples(file_parameters,
            thin_start=opts.thin_start, thin_interval=opts.thin_interval,
            thin_end=opts.thin_end, iteration=opts.iteration,
            samples_group=opts.parameters_group,
            **kwargs)
        # add parameters not included in file
        samples = transforms.apply_transforms(samples, ts)
    else:
        samples = None

    return fp, parameters, labels, samples
예제 #12
0
def results_from_cli(opts, load_samples=True, **kwargs):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.

    \**kwargs :
        All other keyword arguments are passed to the InferenceFile's
        read_samples function.

    Returns
    -------
    fp_all : pycbc.io.InferenceFile
        The result file as an InferenceFile. If more than one input file,
        then it returns a list.
    parameters_all : list
        List of the parameters to use, parsed from the parameters option.
        If more than one input file, then it returns a list.
    labels_all : list
        List of labels to associate with the parameters. If more than one
        input file, then it returns a list.
    samples_all : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    """

    # lists for files and samples from all input files
    fp_all = []
    parameters_all = []
    labels_all = []
    samples_all = []

    # loop over all input files
    for input_file in opts.input_file:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = InferenceFile(input_file, "r")

        # get parameters and a dict of labels for each parameter
        parameters = fp.variable_args if opts.parameters is None \
                         else opts.parameters
        parameters, ldict = parse_parameters_opt(parameters)

        # convert labels dict to list
        labels = []
        for p in parameters:
            try:
                label = ldict[p]
            except KeyError:
                label = fp.read_label(p)
            labels.append(label)

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)

            # read samples from file
            samples = fp.read_samples(
                file_parameters, thin_start=opts.thin_start,
                thin_interval=opts.thin_interval, thin_end=opts.thin_end,
                iteration=opts.iteration,
                samples_group=opts.parameters_group, **kwargs)

            # add parameters not included in file
            samples = transforms.apply_transforms(samples, ts)

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(opts.input_file) > 1:
            fp_all.append(fp)
            parameters_all.append(parameters)
            labels_all.append(labels)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            parameters_all = parameters
            labels_all = labels
            samples_all = samples

    return fp_all, parameters_all, labels_all, samples_all
예제 #13
0
def results_from_cli(opts, load_samples=True, walkers=None):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.
    walkers : {None, (list of) int}
        If loading samples, the walkers to load from. If None, will load from
        all walkers.

    Returns
    -------
    result_file : pycbc.io.InferenceFile
        The result file as an InferenceFile.
    parameters : list
        List of the parameters to use, parsed from the parameters option.
    labels : list
        List of labels to associate with the parameters.
    samples : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
    """

    logging.info("Reading input file")
    fp = InferenceFile(opts.input_file, "r")
    parameters = fp.variable_args if opts.parameters is None \
                 else opts.parameters

    # load the labels
    labels = []
    for ii, p in enumerate(parameters):
        if len(p.split(':')) == 2:
            p, label = p.split(':')
            parameters[ii] = p
        else:
            label = fp.read_label(p)
        labels.append(label)

    # load the samples
    if load_samples:
        logging.info("Loading samples")
        # check if need extra parameters for a non-sampling parameter
        file_parameters, ts = transforms.get_common_cbc_transforms(
            parameters, fp.variable_args)
        # read samples from file
        samples = fp.read_samples(file_parameters,
                                  walkers=walkers,
                                  thin_start=opts.thin_start,
                                  thin_interval=opts.thin_interval,
                                  thin_end=opts.thin_end,
                                  iteration=opts.iteration,
                                  samples_group=opts.parameters_group)
        # add parameters not included in file
        samples = transforms.apply_transforms(samples, ts)
    else:
        samples = None

    return fp, parameters, labels, samples
예제 #14
0
def results_from_cli(opts, load_samples=True, walkers=None):
    """
    Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : {True, bool}
        Load samples from the results file using the parameters, thin_start,
        and thin_interval specified in the options. The samples are returned
        as a FieldArray instance.
    walkers : {None, (list of) int}
        If loading samples, the walkers to load from. If None, will load from
        all walkers.

    Returns
    -------
    result_file : pycbc.io.InferenceFile
        The result file as an InferenceFile.
    parameters : list
        List of the parameters to use, parsed from the parameters option.
    labels : list
        List of labels to associate with the parameters.
    samples : {None, FieldArray}
        If load_samples, the samples as a FieldArray; otherwise, None.
    """

    logging.info("Reading input file")
    fp = InferenceFile(opts.input_file, "r")
    parameters = fp.variable_args if opts.parameters is None \
                 else opts.parameters

    # load the labels
    labels = []
    for ii,p in enumerate(parameters):
        if len(p.split(':')) == 2:
            p, label = p.split(':')
            parameters[ii] = p
        else:
            label = fp.read_label(p)
        labels.append(label)

    # load the samples
    if load_samples:
        logging.info("Loading samples")
        # check if need extra parameters for a non-sampling parameter
        file_parameters, ts = transforms.get_common_cbc_transforms(
                                                 parameters, fp.variable_args)
        # read samples from file
        samples = fp.read_samples(
            file_parameters, walkers=walkers,
            thin_start=opts.thin_start, thin_interval=opts.thin_interval,
            thin_end=opts.thin_end, iteration=opts.iteration,
            samples_group=opts.parameters_group)
        # add parameters not included in file
        samples = transforms.apply_transforms(samples, ts)
    else:
        samples = None

    return fp, parameters, labels, samples
예제 #15
0
파일: __init__.py 프로젝트: lbarosi/pycbc
def results_from_cli(opts, load_samples=True, **kwargs):
    """Loads an inference result file along with any labels associated with it
    from the command line options.

    Parameters
    ----------
    opts : ArgumentParser options
        The options from the command line.
    load_samples : bool, optional
        Load the samples from the file.

    Returns
    -------
    fp_all : (list of) BaseInferenceFile type
        The result file as an hdf file. If more than one input file,
        then it returns a list.
    parameters : list of str
        List of the parameters to use, parsed from the parameters option.
    labels : dict
        Dictionary of labels to associate with the parameters.
    samples_all : (list of) FieldArray(s) or None
        If load_samples, the samples as a FieldArray; otherwise, None.
        If more than one input file, then it returns a list.
    \**kwargs :
        Any other keyword arguments that are passed to read samples using
        samples_from_cli
    """

    # lists for files and samples from all input files
    fp_all = []
    samples_all = []

    input_files = opts.input_file
    if isinstance(input_files, str):
        input_files = [input_files]

    # load constraints
    constraints = {}
    if opts.constraint is not None:
        for constraint in opts.constraint:
            if len(constraint.split(':')) == 2:
                constraint, fn = constraint.split(':')
                constraints[fn] = constraint
            # no file provided, make sure there's only one constraint
            elif len(opts.constraint) > 1:
                raise ValueError("must provide a file to apply constraints "
                                 "to if providing more than one constraint")
            else:
                # this means no file, only one constraint, apply to all
                # files
                constraints = {fn: constraint for fn in input_files}

    # loop over all input files
    for input_file in input_files:
        logging.info("Reading input file %s", input_file)

        # read input file
        fp = loadfile(input_file, "r")

        # load the samples
        if load_samples:
            logging.info("Loading samples")

            # check if need extra parameters for a non-sampling parameter
            file_parameters, ts = _transforms.get_common_cbc_transforms(
                opts.parameters, fp.variable_params)

            # read samples from file
            samples = fp.samples_from_cli(opts,
                                          parameters=file_parameters,
                                          **kwargs)

            logging.info("Loaded {} samples".format(samples.size))

            # add parameters not included in file
            samples = _transforms.apply_transforms(samples, ts)

            if input_file in constraints:
                logging.info("Applying constraints")
                mask = samples[constraints[input_file]]
                samples = samples[mask]
                if samples.size == 0:
                    raise ValueError("No samples remain after constraint {} "
                                     "applied".format(constraints[input_file]))
                logging.info("{} samples remain".format(samples.size))

        # else do not read samples
        else:
            samples = None

        # add results to lists from all input files
        if len(input_files) > 1:
            fp_all.append(fp)
            samples_all.append(samples)

        # else only one input file then do not return lists
        else:
            fp_all = fp
            samples_all = samples

    return fp_all, opts.parameters, opts.parameters_labels, samples_all