Exemplo n.º 1
0
    def samples(self):
        """Returns the samples in the chain as a FieldArray.

        If the sampling args are not the same as the variable args, the
        returned samples will have both the sampling and the variable args.

        The returned FieldArray has dimension [additional dimensions x]
        nwalkers x niterations.
        """
        # chain is a [additional dimensions x] niterations x ndim array
        samples = self.chain
        sampling_args = self.sampling_args
        # convert to dictionary to apply boundary conditions
        samples = {
            param: samples[..., ii]
            for ii, param in enumerate(sampling_args)
        }
        samples = self.likelihood_evaluator._prior.apply_boundary_conditions(
            **samples)
        # now convert to field array
        samples = FieldArray.from_arrays(
            [samples[param] for param in sampling_args], names=sampling_args)
        # apply transforms to go to variable args space
        return self.likelihood_evaluator.apply_sampling_transforms(
            samples, inverse=True)
Exemplo n.º 2
0
    def prior_rvs(self, size=1, prior=None):
        """Returns random variates drawn from the prior.

        If the ``sampling_params`` are different from the ``variable_params``,
        the variates are transformed to the `sampling_params` parameter space
        before being returned.

        Parameters
        ----------
        size : int, optional
            Number of random values to return for each parameter. Default is 1.
        prior : JointDistribution, optional
            Use the given prior to draw values rather than the saved prior.

        Returns
        -------
        FieldArray
            A field array of the random values.
        """
        # draw values from the prior
        if prior is None:
            prior = self.prior_distribution
        p0 = prior.rvs(size=size)
        # transform if necessary
        if self.sampling_transforms is not None:
            ptrans = self.sampling_transforms.apply(p0)
            # pull out the sampling args
            p0 = FieldArray.from_arrays(
                [ptrans[arg] for arg in self.sampling_params],
                names=self.sampling_params)
        return p0
Exemplo n.º 3
0
    def prior_rvs(self, size=1, prior=None):
        """Returns random variates drawn from the prior.

        If the ``sampling_args`` are different from the ``variable_args``, the
        variates are transformed to the `sampling_args` parameter space before
        being returned.

        Parameters
        ----------
        size : int, optional
            Number of random values to return for each parameter. Default is 1.
        prior : JointDistribution, optional
            Use the given prior to draw values rather than the saved prior.

        Returns
        -------
        FieldArray
            A field array of the random values.
        """
        # draw values from the prior
        if prior is None:
            prior = self._prior
        p0 = prior.rvs(size=size)
        # transform if necessary
        if self._sampling_transforms is not None:
            ptrans = self.apply_sampling_transforms(p0)
            # pull out the sampling args
            p0 = FieldArray.from_arrays([ptrans[arg]
                                         for arg in self._sampling_args],
                                        names=self._sampling_args)
        return p0
Exemplo n.º 4
0
Arquivo: base.py Projeto: vivienr/gwin
    def samples(self):
        """Returns the samples in the chain as a FieldArray.

        If the sampling args are not the same as the model params, the
        returned samples will have both the sampling and the model params.

        The returned FieldArray has dimension [additional dimensions x]
        nwalkers x niterations.
        """
        # chain is a [additional dimensions x] niterations x ndim array
        samples = self.chain
        sampling_params = self.sampling_params
        # convert to dictionary to apply boundary conditions
        samples = {
            param: samples[..., ii]
            for ii, param in enumerate(sampling_params)
        }
        samples = self.model.prior_distribution.apply_boundary_conditions(
            **samples)
        # now convert to field array
        samples = FieldArray.from_arrays(
            [samples[param] for param in sampling_params],
            names=sampling_params)
        # apply transforms to go to model params space
        if self.model.sampling_transforms is not None:
            samples = self.model.sampling_transforms.apply(samples,
                                                           inverse=True)
        return samples
Exemplo n.º 5
0
 def likelihood_stats(self):
     """Returns the log likelihood ratio and log prior as a FieldArray.
     The returned array has shape ntemps x nwalkers x niterations.
     """
     # likelihood has shape ntemps x nwalkers x niterations
     logl = self._sampler.lnlikelihood
     # get prior from posterior
     logp = self._sampler.lnprobability - logl
     # compute the likelihood ratio
     loglr = logl - self.likelihood_evaluator.lognl
     return FieldArray.from_kwargs(loglr=loglr, prior=logp)
Exemplo n.º 6
0
 def likelihood_stats(self):
     """Returns the log likelihood ratio and log prior as a FieldArray.
     The returned array has shape ntemps x nwalkers x niterations.
     """
     # likelihood has shape ntemps x nwalkers x niterations
     logl = self._sampler.lnlikelihood
     # get prior from posterior
     logp = self._sampler.lnprobability - logl
     # compute the likelihood ratio
     loglr = logl - self.likelihood_evaluator.lognl
     return FieldArray.from_kwargs(loglr=loglr, prior=logp)
Exemplo n.º 7
0
 def likelihood_stats(self):
     """Returns the likelihood stats as a FieldArray, with field names
     corresponding to the type of data returned by the likelihood evaluator.
     The returned array has shape nwalkers x niterations. If no additional
     stats were returned to the sampler by the likelihood evaluator, returns
     None.
     """
     stats = numpy.array(self._sampler.blobs)
     if stats.size == 0:
         return None
     arrays = dict([[field, stats[:, :, fi]]
                    for fi, field in
                   enumerate(self.likelihood_evaluator.metadata_fields)])
     return FieldArray.from_kwargs(**arrays).transpose()
Exemplo n.º 8
0
 def likelihood_stats(self):
     """Returns the likelihood stats as a FieldArray, with field names
     corresponding to the type of data returned by the likelihood evaluator.
     The returned array has shape nwalkers x niterations. If no additional
     stats were returned to the sampler by the likelihood evaluator, returns
     None.
     """
     stats = numpy.array(self._sampler.blobs)
     if stats.size == 0:
         return None
     arrays = dict([[field, stats[:, :, fi]]
                    for fi, field in
                   enumerate(self.likelihood_evaluator.metadata_fields)])
     return FieldArray.from_kwargs(**arrays).transpose()
Exemplo n.º 9
0
    def __init__(self, filename, approximant=None, **kwds):
        ext = os.path.basename(filename)
        if 'xml' in ext:
            self.indoc = ligolw_utils.load_filename(
                filename, False, contenthandler=LIGOLWContentHandler)
            self.table = table.get_table(
                self.indoc, lsctables.SnglInspiralTable.tableName)
            self.table = FieldArray.from_ligolw_table(self.table)

            # inclination stored in xml alpha3 column
            names = list(self.table.dtype.names)
            names = tuple([n if n != 'alpha3' else 'inclination' for n in names]) 
            self.table.dtype.names = names    

        elif 'hdf' in ext:
            f = h5py.File(filename, 'r')
            dtype = []
            data = {}
            for key in f.keys():
                try:
                    data[str(key)] = f[key][:]
                    dtype.append((str(key), data[key].dtype))
                except:
                    pass

            num = len(data[data.keys()[0]])
            self.table = FieldArray(num, dtype=dtype)
            for key in data:
                self.table[key] = data[key]
        else:
            raise ValueError("Unsupported template bank file extension %s" % ext)

        if not hasattr(self.table, 'template_duration'):
            self.table = self.table.add_fields(numpy.zeros(len(self.table),
                                     dtype=numpy.float32), 'template_duration') 
        self.extra_args = kwds  
        self.approximant_str = approximant
Exemplo n.º 10
0
    def copy_samples(self,
                     other,
                     parameters=None,
                     parameter_names=None,
                     read_args=None,
                     write_args=None):
        """Should copy samples to the other files.

        Parameters
        ----------
        other : InferenceFile
            An open inference file to write to.
        parameters : list of str, optional
            List of parameters to copy. If None, will copy all parameters.
        parameter_names : dict, optional
            Rename one or more parameters to the given name. The dictionary
            should map parameter -> parameter name. If None, will just use the
            original parameter names.
        read_args : dict, optional
            Arguments to pass to ``read_samples``.
        write_args : dict, optional
            Arguments to pass to ``write_samples``.
        """
        # select the samples to copy
        logging.info("Reading samples to copy")
        if parameters is None:
            parameters = self.variable_params
        # if list of desired parameters is different, rename
        if set(parameters) != set(self.variable_params):
            other.attrs['variable_params'] = parameters
        if read_args is None:
            read_args = {}
        samples = self.read_samples(parameters, **read_args)
        logging.info("Copying {} samples".format(samples.size))
        # if different parameter names are desired, get them from the samples
        if parameter_names:
            arrs = {pname: samples[p] for p, pname in parameter_names.items()}
            arrs.update({
                p: samples[p]
                for p in parameters if p not in parameter_names
            })
            samples = FieldArray.from_kwargs(**arrs)
            other.attrs['variable_params'] = samples.fieldnames
        logging.info("Writing samples")
        if write_args is None:
            write_args = {}
        other.write_samples({p: samples[p]
                             for p in samples.fieldnames}, **write_args)
Exemplo n.º 11
0
 def likelihood_stats(self):
     """Returns the likelihood stats as a FieldArray, with field names
     corresponding to the type of data returned by the likelihood evaluator.
     The returned array has shape nwalkers x niterations. If no additional
     stats were returned to the sampler by the likelihood evaluator, returns
     None.
     """
     stats = numpy.array(self._sampler.blobs)
     if stats.size == 0:
         return None
     # we'll force arrays to float; this way, if there are `None`s in the
     # blobs, they will be changed to `nan`s
     arrays = {field: stats[..., fi].astype(float)
               for fi, field in
               enumerate(self.likelihood_evaluator.metadata_fields)}
     return FieldArray.from_kwargs(**arrays).transpose()
Exemplo n.º 12
0
 def likelihood_stats(self):
     """Returns the likelihood stats as a FieldArray, with field names
     corresponding to the type of data returned by the likelihood evaluator.
     The returned array has shape nwalkers x niterations. If no additional
     stats were returned to the sampler by the likelihood evaluator, returns
     None.
     """
     stats = numpy.array(self._sampler.blobs)
     if stats.size == 0:
         return None
     # we'll force arrays to float; this way, if there are `None`s in the
     # blobs, they will be changed to `nan`s
     arrays = {field: stats[..., fi].astype(float)
               for fi, field in
               enumerate(self.likelihood_evaluator.metadata_fields)}
     return FieldArray.from_kwargs(**arrays).transpose()
Exemplo n.º 13
0
    def compute_acls(cls, fp, start_index=None, end_index=None):
        """Computes the autocorrleation length for all variable args and
        temperatures in the given file.
        
        Parameter values are averaged over all walkers at each iteration and
        temperature.  The ACL is then calculated over the averaged chain. If
        the returned ACL is `inf`,  will default to the number of current
        iterations.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.

        Returns
        -------
        FieldArray
            An ntemps-long `FieldArray` containing the ACL for each temperature
            and for each variable argument, with the variable arguments as
            fields.
        """
        acls = {}
        if end_index is None:
            end_index = fp.niterations
        tidx = numpy.arange(fp.ntemps)
        for param in fp.variable_args:
            these_acls = numpy.zeros(fp.ntemps, dtype=int)
            for tk in tidx:
                samples = cls.read_samples(fp, param, thin_start=start_index,
                                           thin_interval=1, thin_end=end_index,
                                           temps=tk, flatten=False)[param]
                # contract the walker dimension using the mean, and flatten
                # the (length 1) temp dimension
                samples = samples.mean(axis=1)[0,:]
                acl = autocorrelation.calculate_acl(samples)
                if numpy.isinf(acl):
                    acl = samples.size
                these_acls[tk] = acl
            acls[param] = these_acls
        return FieldArray.from_kwargs(**acls)
Exemplo n.º 14
0
    def compute_acls(cls, fp, start_index=None, end_index=None):
        """Computes the autocorrleation length for all variable args and
        temperatures in the given file.
        
        Parameter values are averaged over all walkers at each iteration and
        temperature.  The ACL is then calculated over the averaged chain. If
        the returned ACL is `inf`,  will default to the number of current
        iterations.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.

        Returns
        -------
        FieldArray
            An ntemps-long `FieldArray` containing the ACL for each temperature
            and for each variable argument, with the variable arguments as
            fields.
        """
        acls = {}
        if end_index is None:
            end_index = fp.niterations
        tidx = numpy.arange(fp.ntemps)
        for param in fp.variable_args:
            these_acls = numpy.zeros(fp.ntemps, dtype=int)
            for tk in tidx:
                samples = cls.read_samples(fp, param, thin_start=start_index,
                                           thin_interval=1, thin_end=end_index,
                                           temps=tk, flatten=False)[param]
                # contract the walker dimension using the mean, and flatten
                # the (length 1) temp dimension
                samples = samples.mean(axis=1)[0,:]
                acl = autocorrelation.calculate_acl(samples)
                if numpy.isinf(acl):
                    acl = samples.size
                these_acls[tk] = acl
            acls[param] = these_acls
        return FieldArray.from_kwargs(**acls)
Exemplo n.º 15
0
    def compute_acls(cls, fp, start_index=None, end_index=None):
        """Computes the autocorrleation length for all variable args for all
        walkers for all temps in the given file. If the returned acl is inf,
        will default to the number of requested iterations.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.

        Returns
        -------
        FieldArray
            An ntemps x nwalkers `FieldArray` containing the acl for each
            walker and temp for each variable argument, with the variable
            arguments as fields.
        """
        acls = {}
        if end_index is None:
            end_index = fp.niterations
        tidx = numpy.arange(fp.ntemps)
        widx = numpy.arange(fp.nwalkers)
        for param in fp.variable_args:
            these_acls = numpy.zeros((fp.ntemps, fp.nwalkers), dtype=int)
            for tk in tidx:
                for wi in widx:
                    samples = cls.read_samples(fp,
                                               param,
                                               thin_start=start_index,
                                               thin_interval=1,
                                               thin_end=end_index,
                                               walkers=wi,
                                               temps=tk)[param]
                    acl = autocorrelation.calculate_acl(samples)
                    these_acls[tk, wi] = int(min(acl, samples.size))
            acls[param] = these_acls
        return FieldArray.from_kwargs(**acls)
Exemplo n.º 16
0
 def likelihood_stats(self):
     """Returns the log likelihood ratio and log prior as a FieldArray.
     The returned array has shape ntemps x nwalkers x niterations.
     """
     # likelihood has shape ntemps x nwalkers x niterations
     logl = self._sampler.lnlikelihood
     # get prior from posterior
     logp = self._sampler.lnprobability - logl
     # compute the likelihood ratio
     loglr = logl - self.likelihood_evaluator.lognl
     kwargs = {'loglr': loglr, 'prior': logp}
     # if different coordinates were used for sampling, get the jacobian
     if self.likelihood_evaluator.sampling_transforms is not None:
         samples = self.samples
         # convert to dict
         d = {param: samples[param] for param in samples.fieldnames}
         logj = self.likelihood_evaluator.logjacobian(**d)
         kwargs['logjacobian'] = logj
     return FieldArray.from_kwargs(**kwargs)
Exemplo n.º 17
0
    def compute_acls(cls, fp, start_index=None, end_index=None):
        """Computes the autocorrleation length for all variable args for all
        walkers for all temps in the given file. If the returned acl is inf,
        will default to the number of requested iterations.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.

        Returns
        -------
        FieldArray
            An ntemps x nwalkers `FieldArray` containing the acl for each
            walker and temp for each variable argument, with the variable
            arguments as fields.
        """
        acls = {}
        if end_index is None:
            end_index = fp.niterations
        tidx = numpy.arange(fp.ntemps)
        widx = numpy.arange(fp.nwalkers)
        for param in fp.variable_args:
            these_acls = numpy.zeros((fp.ntemps, fp.nwalkers), dtype=int)
            for tk in tidx:
                for wi in widx:
                    samples = cls.read_samples(
                            fp, param,
                            thin_start=start_index, thin_interval=1,
                            thin_end=end_index,
                            walkers=wi, temps=tk)[param]
                    acl = autocorrelation.calculate_acl(samples)
                    these_acls[tk, wi] = int(min(acl, samples.size))
            acls[param] = these_acls
        return FieldArray.from_kwargs(**acls)
Exemplo n.º 18
0
    def _oldstyle_read_acls(fp):
        """Deprecated: reads acls from older style files.

        Parameters
        ----------
        fp : InferenceFile
            An open file handler to read the acls from.

        Returns
        -------
        FieldArray
            An ntemps-long ``FieldArray`` containing the acls for every
            temperature, with the variable arguments as fields.
        """
        group = fp.samples_group + '/{param}/temp{tk}'
        tidx = numpy.arange(fp.ntemps)
        arrays = {}
        for param in fp.variable_args:
            arrays[param] = numpy.array([
                fp[group.format(param=param, tk=tk)].attrs['acl']
                for tk in tidx])
        return FieldArray.from_kwargs(**arrays)
Exemplo n.º 19
0
    def read_acls(fp):
        """Reads the acls of all the walker chains saved in the given file.

        Parameters
        ----------
        fp : InferenceFile
            An open file handler to read the acls from.

        Returns
        -------
        FieldArray
            An ntemps-long ``FieldArray`` containing the acls for every
            temperature, with the variable arguments as fields.
        """
        group = fp.samples_group + '/{param}/temp{tk}'
        tidx = numpy.arange(fp.ntemps)
        arrays = {}
        for param in fp.variable_args:
            arrays[param] = numpy.array([
                fp[group.format(param=param, tk=tk)].attrs['acl']
                for tk in tidx])
        return FieldArray.from_kwargs(**arrays)
Exemplo n.º 20
0
    def read_acls(fp):
        """Reads the acls of all the walker chains saved in the given file.

        Parameters
        ----------
        fp : InferenceFile
            An open file handler to read the acls from.

        Returns
        -------
        FieldArray
            An nwalkers-long `FieldArray` containing the acl for each walker
            and each variable argument, with the variable arguments as fields.
        """
        group = fp.samples_group + '/{param}/walker{wi}'
        widx = numpy.arange(fp.nwalkers)
        arrays = {}
        for param in fp.variable_args:
            arrays[param] = numpy.array([
                fp[group.format(param=param, wi=wi)].attrs['acl']
                for wi in widx])
        return FieldArray.from_kwargs(**arrays)
Exemplo n.º 21
0
    def read_acls(fp):
        """Reads the acls of all the walker chains saved in the given file.

        Parameters
        ----------
        fp : InferenceFile
            An open file handler to read the acls from.

        Returns
        -------
        FieldArray
            An nwalkers-long `FieldArray` containing the acl for each walker
            and each variable argument, with the variable arguments as fields.
        """
        group = fp.samples_group + '/{param}/walker{wi}'
        widx = numpy.arange(fp.nwalkers)
        arrays = {}
        for param in fp.variable_args:
            arrays[param] = numpy.array([
                fp[group.format(param=param, wi=wi)].attrs['acl']
                for wi in widx])
        return FieldArray.from_kwargs(**arrays)
Exemplo n.º 22
0
    def copy_samples(self, other, parameters=None, parameter_names=None,
                     read_args=None, write_args=None):
        """Should copy samples to the other files.

        Parameters
        ----------
        other : InferenceFile
            An open inference file to write to.
        parameters : list of str, optional
            List of parameters to copy. If None, will copy all parameters.
        parameter_names : dict, optional
            Rename one or more parameters to the given name. The dictionary
            should map parameter -> parameter name. If None, will just use the
            original parameter names.
        read_args : dict, optional
            Arguments to pass to ``read_samples``.
        write_args : dict, optional
            Arguments to pass to ``write_samples``.
        """
        # select the samples to copy
        logging.info("Reading samples to copy")
        if parameters is None:
            parameters = self.variable_params
        # if list of desired parameters is different, rename
        if set(parameters) != set(self.variable_params):
            other.attrs['variable_params'] = parameters
        samples = self.read_samples(parameters, **read_args)
        logging.info("Copying {} samples".format(samples.size))
        # if different parameter names are desired, get them from the samples
        if parameter_names:
            arrs = {pname: samples[p] for p, pname in parameter_names.items()}
            arrs.update({p: samples[p] for p in parameters if
                         p not in parameter_names})
            samples = FieldArray.from_kwargs(**arrs)
            other.attrs['variable_params'] = samples.fieldnames
        logging.info("Writing samples")
        other.write_samples(other, samples, **write_args)
Exemplo n.º 23
0
    def samples(self):
        """Returns the samples in the chain as a FieldArray.

        If the sampling args are not the same as the variable args, the
        returned samples will have both the sampling and the variable args.

        The returned FieldArray has dimension [additional dimensions x]
        nwalkers x niterations.
        """
        # chain is a [additional dimensions x] niterations x ndim array
        samples = self.chain
        sampling_args = self.sampling_args
        # convert to dictionary to apply boundary conditions
        samples = {param: samples[...,ii]
                   for ii,param in enumerate(sampling_args)}
        samples = self.likelihood_evaluator._prior.apply_boundary_conditions(
            **samples)
        # now convert to field array
        samples = FieldArray.from_arrays([samples[param]
                                          for param in sampling_args],
                                         names=sampling_args)
        # apply transforms to go to variable args space
        return self.likelihood_evaluator.apply_sampling_transforms(samples,
            inverse=True)
Exemplo n.º 24
0
def create_density_plot(xparam,
                        yparam,
                        samples,
                        plot_density=True,
                        plot_contours=True,
                        percentiles=None,
                        cmap='viridis',
                        contour_color=None,
                        xmin=None,
                        xmax=None,
                        ymin=None,
                        ymax=None,
                        exclude_region=None,
                        fig=None,
                        ax=None,
                        use_kombine=False):
    """Computes and plots posterior density and confidence intervals using the
    given samples.

    Parameters
    ----------
    xparam : string
        The parameter to plot on the x-axis.
    yparam : string
        The parameter to plot on the y-axis.
    samples : dict, numpy structured array, or FieldArray
        The samples to plot.
    plot_density : {True, bool}
        Plot a color map of the density.
    plot_contours : {True, bool}
        Plot contours showing the n-th percentiles of the density.
    percentiles : {None, float or array}
        What percentile contours to draw. If None, will plot the 50th
        and 90th percentiles.
    cmap : {'viridis', string}
        The name of the colormap to use for the density plot.
    contour_color : {None, string}
        What color to make the contours. Default is white for density
        plots and black for other plots.
    xmin : {None, float}
        Minimum value to plot on x-axis.
    xmax : {None, float}
        Maximum value to plot on x-axis.
    ymin : {None, float}
        Minimum value to plot on y-axis.
    ymax : {None, float}
        Maximum value to plot on y-axis.
    exclue_region : {None, str}
        Exclude the specified region when plotting the density or contours.
        Must be a string in terms of `xparam` and `yparam` that is
        understandable by numpy's logical evaluation. For example, if
        `xparam = m_1` and `yparam = m_2`, and you want to exclude the region
        for which `m_2` is greater than `m_1`, then exclude region should be
        `'m_2 > m_1'`.
    fig : {None, pyplot.figure}
        Add the plot to the given figure. If None and ax is None, will create
        a new figure.
    ax : {None, pyplot.axes}
        Draw plot on the given axis. If None, will create a new axis from
        `fig`.
    use_kombine : {False, bool}
        Use kombine's KDE to calculate density. Otherwise, will use
        `scipy.stats.gaussian_kde.` Default is False.

    Returns
    -------
    fig : pyplot.figure
        The figure the plot was made on.
    ax : pyplot.axes
        The axes the plot was drawn on.
    """
    if percentiles is None:
        percentiles = numpy.array([50., 90.])
    percentiles = 100. - numpy.array(percentiles)
    percentiles.sort()

    if ax is None and fig is None:
        fig = pyplot.figure()
    if ax is None:
        ax = fig.add_subplot(111)

    # convert samples to array and construct kde
    xsamples = samples[xparam]
    ysamples = samples[yparam]
    arr = numpy.vstack((xsamples, ysamples)).T
    kde = construct_kde(arr, use_kombine=use_kombine)

    # construct grid to evaluate on
    if xmin is None:
        xmin = xsamples.min()
    if xmax is None:
        xmax = xsamples.max()
    if ymin is None:
        ymin = ysamples.min()
    if ymax is None:
        ymax = ysamples.max()
    npts = 100
    X, Y = numpy.mgrid[xmin:xmax:complex(0, npts),  # pylint:disable=invalid-slice-index
                       ymin:ymax:complex(0, npts)]  # pylint:disable=invalid-slice-index
    pos = numpy.vstack([X.ravel(), Y.ravel()])
    if use_kombine:
        Z = numpy.exp(kde(pos.T).reshape(X.shape))
        draw = kde.draw
    else:
        Z = kde(pos).T.reshape(X.shape)
        draw = kde.resample

    if exclude_region is not None:
        # convert X,Y to a single FieldArray so we can use it's ability to
        # evaluate strings
        farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
        Z[farr[exclude_region]] = 0.

    if plot_density:
        ax.imshow(numpy.rot90(Z),
                  extent=[xmin, xmax, ymin, ymax],
                  aspect='auto',
                  cmap=cmap,
                  zorder=1)
        if contour_color is None:
            contour_color = 'w'

    if plot_contours:
        # compute the percentile values
        resamps = kde(draw(int(npts**2)))
        if use_kombine:
            resamps = numpy.exp(resamps)
        s = numpy.percentile(resamps, percentiles)
        if contour_color is None:
            contour_color = 'k'
        # make linewidths thicker if not plotting density for clarity
        if plot_density:
            lw = 1
        else:
            lw = 2
        ct = ax.contour(X,
                        Y,
                        Z,
                        s,
                        colors=contour_color,
                        linewidths=lw,
                        zorder=3)
        # label contours
        lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
        fmt = dict(zip(ct.levels, lbls))
        fs = 12
        ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)

    return fig, ax
Exemplo n.º 25
0
    def copy(self, other, parameters=None, parameter_names=None,
             posterior_only=False, **kwargs):
        """Copies data in this file to another file.

        The samples and stats to copy may be down selected using the given
        kwargs. All other data (the "metadata") are copied exactly.

        Parameters
        ----------
        other : str or InferenceFile
            The file to write to. May be either a string giving a filename,
            or an open hdf file. If the former, the file will be opened with
            the write attribute (note that if a file already exists with that
            name, it will be deleted).
        parameters : list of str, optional
            List of parameters to copy. If None, will copy all parameters.
        parameter_names : dict, optional
            Rename one or more parameters to the given name. The dictionary
            should map parameter -> parameter name. If None, will just use the
            original parameter names.
        posterior_only : bool, optional
            Write the samples and likelihood stats as flattened arrays, and
            set other's posterior_only attribute. For example, if this file
            has a parameter's samples written to
            `{samples_group}/{param}/walker{x}`, then other will have all of
            the selected samples from all walkers written to
            `{samples_group}/{param}/`.
        \**kwargs :
            All other keyword arguments are passed to `read_samples`.

        Returns
        -------
        InferenceFile
            The open file handler to other.
        """
        if not isinstance(other, h5py.File):
            # check that we're not trying to overwrite this file
            if other == self.name:
                raise IOError("destination is the same as this file")
            other = InferenceFile(other, 'w')
        # copy metadata over
        self.copy_metadata(other)
        # update other's posterior attribute
        if posterior_only:
            other.attrs['posterior_only'] = posterior_only
        # select the samples to copy
        logging.info("Reading samples to copy")
        if parameters is None:
            parameters = self.variable_args
        # if list of desired parameters is different, rename variable args
        if set(parameters) != set(self.variable_args):
            other.attrs['variable_args'] = parameters
        # if only the posterior is desired, we'll flatten the results
        if not posterior_only and not self.posterior_only:
            kwargs['flatten'] = False
        samples = self.read_samples(parameters, **kwargs)
        logging.info("Copying {} samples".format(samples.size))
        # if different parameter names are desired, get them from the samples
        if parameter_names:
            arrs = {pname: samples[p] for p,pname in parameter_names.items()}
            arrs.update({p: samples[p] for p in parameters
                                        if p not in parameter_names})
            samples = FieldArray.from_kwargs(**arrs)
            other.attrs['variable_args'] = samples.fieldnames
        logging.info("Writing samples")
        other.samples_parser.write_samples_group(other, self.samples_group,
                                                 samples.fieldnames, samples)
        # do the same for the likelihood stats
        logging.info("Reading stats to copy")
        stats = self.read_likelihood_stats(**kwargs)
        logging.info("Writing stats")
        other.samples_parser.write_samples_group(other, self.stats_group,
                                                 stats.fieldnames, stats)
        # if any down selection was done, re-set the burn in iterations and
        # the acl, and the niterations.
        # The last dimension of the samples returned by the sampler should
        # be the number of iterations.
        if samples.shape[-1] != self.niterations:
            other.attrs['acl'] = 1
            other.attrs['burn_in_iterations'] = 0
            other.attrs['niterations'] = samples.shape[-1]
        return other
Exemplo n.º 26
0
    def copy(self, other, parameters=None, parameter_names=None,
             posterior_only=False, **kwargs):
        """Copies data in this file to another file.

        The samples and stats to copy may be down selected using the given
        kwargs. All other data (the "metadata") are copied exactly.

        Parameters
        ----------
        other : str or InferenceFile
            The file to write to. May be either a string giving a filename,
            or an open hdf file. If the former, the file will be opened with
            the write attribute (note that if a file already exists with that
            name, it will be deleted).
        parameters : list of str, optional
            List of parameters to copy. If None, will copy all parameters.
        parameter_names : dict, optional
            Rename one or more parameters to the given name. The dictionary
            should map parameter -> parameter name. If None, will just use the
            original parameter names.
        posterior_only : bool, optional
            Write the samples and likelihood stats as flattened arrays, and
            set other's posterior_only attribute. For example, if this file
            has a parameter's samples written to
            `{samples_group}/{param}/walker{x}`, then other will have all of
            the selected samples from all walkers written to
            `{samples_group}/{param}/`.
        \**kwargs :
            All other keyword arguments are passed to `read_samples`.

        Returns
        -------
        InferenceFile
            The open file handler to other.
        """
        if not isinstance(other, h5py.File):
            # check that we're not trying to overwrite this file
            if other == self.name:
                raise IOError("destination is the same as this file")
            other = InferenceFile(other, 'w')
        # copy metadata over
        self.copy_metadata(other)
        # update other's posterior attribute
        if posterior_only:
            other.attrs['posterior_only'] = posterior_only
        # select the samples to copy
        logging.info("Reading samples to copy")
        if parameters is None:
            parameters = self.variable_args
        # if list of desired parameters is different, rename variable args
        if set(parameters) != set(self.variable_args):
            other.attrs['variable_args'] = parameters
        # if only the posterior is desired, we'll flatten the results
        if not posterior_only and not self.posterior_only:
            kwargs['flatten'] = False
        samples = self.read_samples(parameters, **kwargs)
        logging.info("Copying {} samples".format(samples.size))
        # if different parameter names are desired, get them from the samples
        if parameter_names:
            arrs = {pname: samples[p] for p,pname in parameter_names.items()}
            arrs.update({p: samples[p] for p in parameters
                                        if p not in parameter_names})
            samples = FieldArray.from_kwargs(**arrs)
            other.attrs['variable_args'] = samples.fieldnames
        logging.info("Writing samples")
        other.samples_parser.write_samples_group(other, self.samples_group,
                                                 samples.fieldnames, samples)
        # do the same for the likelihood stats
        logging.info("Reading stats to copy")
        stats = self.read_likelihood_stats(**kwargs)
        logging.info("Writing stats")
        other.samples_parser.write_samples_group(other, self.stats_group,
                                                 stats.fieldnames, stats)
        # if any down selection was done, re-set the burn in iterations and
        # the acl, and the niterations.
        # The last dimension of the samples returned by the sampler should
        # be the number of iterations.
        if samples.shape[-1] != self.niterations:
            other.attrs['acl'] = 1
            other.attrs['burn_in_iterations'] = 0
            other.attrs['niterations'] = samples.shape[-1]
        return other
Exemplo n.º 27
0
def create_density_plot(xparam, yparam, samples, plot_density=True,
        plot_contours=True, percentiles=None, cmap='viridis',
        contour_color=None, xmin=None, xmax=None, ymin=None, ymax=None,
        exclude_region=None, fig=None, ax=None, use_kombine=False):
    """Computes and plots posterior density and confidence intervals using the
    given samples.

    Parameters
    ----------
    xparam : string
        The parameter to plot on the x-axis.
    yparam : string
        The parameter to plot on the y-axis.
    samples : dict, numpy structured array, or FieldArray
        The samples to plot.
    plot_density : {True, bool}
        Plot a color map of the density.
    plot_contours : {True, bool}
        Plot contours showing the n-th percentiles of the density.
    percentiles : {None, float or array}
        What percentile contours to draw. If None, will plot the 50th
        and 90th percentiles.
    cmap : {'viridis', string}
        The name of the colormap to use for the density plot.
    contour_color : {None, string}
        What color to make the contours. Default is white for density
        plots and black for other plots.
    xmin : {None, float}
        Minimum value to plot on x-axis.
    xmax : {None, float}
        Maximum value to plot on x-axis.
    ymin : {None, float}
        Minimum value to plot on y-axis.
    ymax : {None, float}
        Maximum value to plot on y-axis.
    exclue_region : {None, str}
        Exclude the specified region when plotting the density or contours.
        Must be a string in terms of `xparam` and `yparam` that is
        understandable by numpy's logical evaluation. For example, if
        `xparam = m_1` and `yparam = m_2`, and you want to exclude the region
        for which `m_2` is greater than `m_1`, then exclude region should be
        `'m_2 > m_1'`.
    fig : {None, pyplot.figure}
        Add the plot to the given figure. If None and ax is None, will create
        a new figure.
    ax : {None, pyplot.axes}
        Draw plot on the given axis. If None, will create a new axis from
        `fig`.
    use_kombine : {False, bool}
        Use kombine's KDE to calculate density. Otherwise, will use
        `scipy.stats.gaussian_kde.` Default is False.

    Returns
    -------
    fig : pyplot.figure
        The figure the plot was made on.
    ax : pyplot.axes
        The axes the plot was drawn on.
    """
    if percentiles is None:
        percentiles = numpy.array([50., 90.])
    percentiles = 100. - percentiles
    percentiles.sort()

    if ax is None and fig is None:
        fig = pyplot.figure()
    if ax is None:
        ax = fig.add_subplot(111)

    # convert samples to array and construct kde
    xsamples = samples[xparam]
    ysamples = samples[yparam]
    arr = numpy.vstack((xsamples, ysamples)).T
    kde = construct_kde(arr, use_kombine=use_kombine)

    # construct grid to evaluate on
    if xmin is None:
        xmin = xsamples.min()
    if xmax is None:
        xmax = xsamples.max()
    if ymin is None:
        ymin = ysamples.min()
    if ymax is None:
        ymax = ysamples.max()
    npts = 100
    X, Y = numpy.mgrid[xmin:xmax:complex(0,npts), ymin:ymax:complex(0,npts)]
    pos = numpy.vstack([X.ravel(), Y.ravel()])
    if use_kombine:
        Z = numpy.exp(kde(pos.T).reshape(X.shape))
        draw = kde.draw
    else:
        Z = kde(pos).T.reshape(X.shape)
        draw = kde.resample

    if exclude_region is not None:
        # convert X,Y to a single FieldArray so we can use it's ability to
        # evaluate strings
        farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
        Z[farr[exclude_region]] = 0.

    if plot_density:
        ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
            aspect='auto', cmap=cmap, zorder=1)
        if contour_color is None:
            contour_color = 'w'

    if plot_contours:
        # compute the percentile values
        resamps = kde(draw(int(npts**2)))
        if use_kombine:
            resamps = numpy.exp(resamps)
        s = numpy.percentile(resamps, percentiles)
        if contour_color is None:
            contour_color = 'k'
        # make linewidths thicker if not plotting density for clarity
        if plot_density:
            lw = 1
        else:
            lw = 2
        ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
                        zorder=3)
        # label contours
        lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
        fmt = dict(zip(ct.levels, lbls))
        fs = 12
        ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)

    return fig, ax
Exemplo n.º 28
0
Arquivo: base.py Projeto: vivienr/gwin
    def compute_acfs(cls,
                     fp,
                     start_index=None,
                     end_index=None,
                     per_walker=False,
                     walkers=None,
                     parameters=None):
        """Computes the autocorrleation function of the model params in the
        given file.

        By default, parameter values are averaged over all walkers at each
        iteration. The ACF is then calculated over the averaged chain. An
        ACF per-walker will be returned instead if ``per_walker=True``.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.
        per_walker : optional, bool
            Return the ACF for each walker separately. Default is False.
        walkers : optional, int or array
            Calculate the ACF using only the given walkers. If None (the
            default) all walkers will be used.
        parameters : optional, str or array
            Calculate the ACF for only the given parameters. If None (the
            default) will calculate the ACF for all of the model params.

        Returns
        -------
        FieldArray
            A ``FieldArray`` of the ACF vs iteration for each parameter. If
            `per-walker` is True, the FieldArray will have shape
            ``nwalkers x niterations``.
        """
        acfs = {}
        if parameters is None:
            parameters = fp.variable_params
        if isinstance(parameters, str) or isinstance(parameters, unicode):
            parameters = [parameters]
        for param in parameters:
            if per_walker:
                # just call myself with a single walker
                if walkers is None:
                    walkers = numpy.arange(fp.nwalkers)
                arrays = [
                    cls.compute_acfs(fp,
                                     start_index=start_index,
                                     end_index=end_index,
                                     per_walker=False,
                                     walkers=ii,
                                     parameters=param)[param] for ii in walkers
                ]
                acfs[param] = numpy.vstack(arrays)
            else:
                samples = cls.read_samples(fp,
                                           param,
                                           thin_start=start_index,
                                           thin_interval=1,
                                           thin_end=end_index,
                                           walkers=walkers,
                                           flatten=False)[param]
                samples = samples.mean(axis=0)
                acfs[param] = autocorrelation.calculate_acf(samples).numpy()
        return FieldArray.from_kwargs(**acfs)
Exemplo n.º 29
0

dtype = [('mass1', float), ('mass2', float),
         ('spin1z', float), ('spin2z', float),
         ('tc', float), ('distance', float),
         ('ra', float), ('dec', float),
         ('approximant', 'S32')]

static_params = {'f_lower': 17.,
                 'f_ref': 17.,
                 'taper': 'start',
                 'inclination': 0.,
                 'coa_phase': 0.,
                 'polarization': 0.}

samples = FieldArray(2, dtype=dtype)

# masses and spins are intended to match the highest
# and lowest mass templates in the template bank
samples['mass1'] = [290.929321, 1.1331687]
samples['mass2'] = [3.6755455, 1.010624]
samples['spin1z'] = [0.9934847, 0.029544285]
samples['spin2z'] = [0.92713535, 0.020993788]

# distance and sky locations to have network SNRs ~15
samples['tc'] = [1272790100.1, 1272790260.1]
samples['distance'] = [178., 79.]
samples['ra'] = [np.deg2rad(45), np.deg2rad(10)]
samples['dec'] = [np.deg2rad(45), np.deg2rad(-45)]

samples['approximant'] = ['SEOBNRv4_opt', 'SpinTaylorT4']
Exemplo n.º 30
0
    def __init__(
        self,
        tcs,
        freqs,
        amps,
        taus,
        ras,
        decs,
        psis=0.0,
        phis=0.0,
        inclinations=np.pi / 2.0,
        detector=None,
        starttime=1000000000,
        duration=1.0,
        deltat=1 / 8192,
        psd=aLIGOZeroDetHighPower,
        asd=None,
        flow=20.0,
    ):
        # get the number of injections (use injection times, tcs)
        if isinstance(tcs, float):
            tcs = np.asarray([tcs])
        self.ninj = len(tcs)

        # set up a FieldArray to contain required ring-down parameters
        self.__injections = FieldArray(
            self.ninj,
            dtype=[
                ("approximant", "S20"),
                ("f_220", "<f8"),
                ("lmns", "S3"),
                ("tau_220", "<f8"),
                ("amp220", "<f8"),
                ("phi220", "<f8"),
                ("polarization", "<f8"),
                ("inclination", "<f8"),
                ("ra", "<f8"),
                ("dec", "<f8"),
                ("tc", "<f8"),
            ],
        )

        pnames = [
            "f_220",
            "tau_220",
            "amp220",
            "phi220",
            "ra",
            "dec",
            "inclination",
            "polarization",
            "tc",
        ]

        for param, pvalue in zip(
            pnames,
            [freqs, taus, amps, phis, ras, decs, inclinations, psis, tcs],
        ):
            if isinstance(pvalue, float):
                self.__injections[param] = np.full(self.ninj, pvalue)
            elif isinstance(pvalue, (list, np.ndarray)):
                if len(pvalue) != self.ninj:
                    raise ValueError(
                        "{} must have the same number of entries as 'tc'".format(param)
                    )

                self.__injections[param] = pvalue
            else:
                raise TypeError("Input must be a float or list")

        # the PyCBC "TdQNMfromFreqTau" approximant creates time-domain ring-down signals
        self.__injections["approximant"] = np.full(self.ninj, "TdQNMfromFreqTau")
        self.__injections["lmns"] = np.full(
            self.ninj, "221"
        )  # use 1 22 mode (the 220 mode)

        # create the injections
        self.create_injections()

        # create the simulated data if requested
        if detector is not None:
            self.create_data(
                detector, starttime, duration, deltat, psd, asd=asd, flow=flow
            )

            # inject signal(s) into the data
            self.inject()
Exemplo n.º 31
0
def check_coinc_results():
    coinc_fail = False
    # gather coincident triggers
    coinc_trig_paths = sorted(glob.glob('output/coinc*.xml.gz'))
    n_coincs = len(coinc_trig_paths)
    if n_coincs == 0:
        log.error('No coincident triggers detected')
        coinc_fail = True
    elif n_coincs >= 10:
        log.error('Too many coincident triggers detected')
        coinc_fail = True
    else:
        log.info('%d coincident trigger(s) detected', n_coincs)

    injs = sorted(glob.glob('test_inj*.hdf'))
    n_injs = len(injs)
    inj_mass1 = np.empty(n_injs)
    inj_mass2 = np.empty(n_injs)
    inj_spin1z = np.empty(n_injs)
    inj_spin2z = np.empty(n_injs)
    inj_time = np.empty(n_injs)

    for idx, inj_path in enumerate(injs):
        with h5py.File(inj_path, 'r') as inj:
            inj_mass1[idx] = inj['mass1'][0]
            inj_mass2[idx] = inj['mass2'][0]
            inj_spin1z[idx] = inj['spin1z'][0]
            inj_spin2z[idx] = inj['spin2z'][0]
            inj_time[idx] = inj['tc'][0]

    if n_injs > n_coincs:
        log.error('More injections than coincident triggers')
        coinc_fail = True

    # create field array to store properties of triggers
    dtype = [('mass1', float), ('mass2', float), ('spin1z', float),
             ('spin2z', float), ('tc', float), ('net_snr', float)]
    trig_props = FieldArray(n_coincs, dtype=dtype)

    # store properties of coincident triggers
    for x, ctrigfp in enumerate(coinc_trig_paths):
        log.info('Checking trigger %s', ctrigfp)
        xmldoc = ligolw_utils.load_filename(
            ctrigfp, False, contenthandler=LIGOLWContentHandler)
        sngl_inspiral_table = lsctables.SnglInspiralTable.get_table(xmldoc)

        trig_props['tc'][x] = sngl_inspiral_table.get_column('end_time')[0]
        trig_props['mass1'][x] = sngl_inspiral_table.get_column('mass1')[0]
        trig_props['mass2'][x] = sngl_inspiral_table.get_column('mass2')[0]
        trig_props['spin1z'][x] = sngl_inspiral_table.get_column('spin1z')[0]
        trig_props['spin2z'][x] = sngl_inspiral_table.get_column('spin2z')[0]

        snr_list = sngl_inspiral_table.get_column('snr')
        trig_props['net_snr'][x] = sum(snr_list**2)**0.5

        log.info('IFO SNRs: %s', snr_list)
        log.info('Network SNR: %f', trig_props['net_snr'][x])
        log.info('IFO End Time: %f', trig_props['tc'][x])
        log.info('Mass 1: %f', trig_props['mass1'][x])
        log.info('Mass 2: %f', trig_props['mass2'][x])
        log.info('Spin1z: %f', trig_props['spin1z'][x])
        log.info('Spin2z: %f', trig_props['spin2z'][x])

    # check if injections match trigger params
    for i in range(n_injs):
        has_match = False
        for j in range(n_coincs):
            if (close(inj_time[i], trig_props['tc'][j], 1.0)
                    and close(inj_mass1[i], trig_props['mass1'][j], 5e-7)
                    and close(inj_mass2[i], trig_props['mass2'][j], 5e-7)
                    and close(inj_spin1z[i], trig_props['spin1z'][j], 5e-7)
                    and close(inj_spin2z[i], trig_props['spin2z'][j], 5e-7)
                    and close(15.0, trig_props['net_snr'][j], 1.0)):
                has_match = True
                break

        if not has_match:
            coinc_fail = True
            log.error('Injection %i has no match', i)

    if coinc_fail:
        log.error('Coincident Trigger Test Failed')
    return coinc_fail
Exemplo n.º 32
0
    def compute_acfs(cls, fp, start_index=None, end_index=None,
                     per_walker=False, walkers=None, parameters=None):
        """Computes the autocorrleation function of the variable args in the
        given file.

        By default, parameter values are averaged over all walkers at each
        iteration. The ACF is then calculated over the averaged chain. An
        ACF per-walker will be returned instead if ``per_walker=True``.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.
        per_walker : optional, bool
            Return the ACF for each walker separately. Default is False.
        walkers : optional, int or array
            Calculate the ACF using only the given walkers. If None (the
            default) all walkers will be used.
        parameters : optional, str or array
            Calculate the ACF for only the given parameters. If None (the
            default) will calculate the ACF for all of the variable args.

        Returns
        -------
        FieldArray
            A ``FieldArray`` of the ACF vs iteration for each parameter. If
            `per-walker` is True, the FieldArray will have shape
            ``nwalkers x niterations``.
        """
        acfs = {}
        if parameters is None:
            parameters = fp.variable_args
        if isinstance(parameters, str) or isinstance(parameters, unicode):
            parameters = [parameters]
        for param in parameters:
            if per_walker:
                # just call myself with a single walker
                if walkers is None:
                    walkers = numpy.arange(fp.nwalkers)
                arrays = [cls.compute_acfs(fp, start_index=start_index,
                                           end_index=end_index,
                                           per_walker=False, walkers=ii,
                                           parameters=param)[param]
                          for ii in walkers]
                acfs[param] = numpy.vstack(arrays)
            else:
                samples = cls.read_samples(fp, param,
                                           thin_start=start_index,
                                           thin_interval=1, thin_end=end_index,
                                           walkers=walkers,
                                           flatten=False)[param]
                samples = samples.mean(axis=0)
                acfs[param] = autocorrelation.calculate_acf(samples).numpy()
        return FieldArray.from_kwargs(**acfs)
Exemplo n.º 33
0
class TemplateBank(object):
    """ Class to provide some basic helper functions and information
    about elements of an xml template bank.
    """
    def __init__(self, filename, approximant=None, **kwds):
        ext = os.path.basename(filename)
        if 'xml' in ext:
            self.indoc = ligolw_utils.load_filename(
                filename, False, contenthandler=LIGOLWContentHandler)
            self.table = table.get_table(
                self.indoc, lsctables.SnglInspiralTable.tableName)
            self.table = FieldArray.from_ligolw_table(self.table)

            # inclination stored in xml alpha3 column
            names = list(self.table.dtype.names)
            names = tuple([n if n != 'alpha3' else 'inclination' for n in names]) 
            self.table.dtype.names = names    

        elif 'hdf' in ext:
            f = h5py.File(filename, 'r')
            dtype = []
            data = {}
            for key in f.keys():
                try:
                    data[str(key)] = f[key][:]
                    dtype.append((str(key), data[key].dtype))
                except:
                    pass

            num = len(data[data.keys()[0]])
            self.table = FieldArray(num, dtype=dtype)
            for key in data:
                self.table[key] = data[key]
        else:
            raise ValueError("Unsupported template bank file extension %s" % ext)

        if not hasattr(self.table, 'template_duration'):
            self.table = self.table.add_fields(numpy.zeros(len(self.table),
                                     dtype=numpy.float32), 'template_duration') 
        self.extra_args = kwds  
        self.approximant_str = approximant

    @staticmethod
    def parse_option(row, arg):
        safe_dict = {}
        safe_dict.update(row.__dict__)
        safe_dict.update(math.__dict__)
        return eval(arg, {"__builtins__":None}, safe_dict)

    def end_frequency(self, index):
        """ Return the end frequency of the waveform at the given index value
        """
        from pycbc.waveform.waveform import props

        return pycbc.waveform.get_waveform_end_frequency(self.table[index],
                              approximant=self.approximant(index),
                              **self.extra_args)      

    def approximant(self, index):
        """ Return the name of the approximant ot use at the given index
        """
        if self.approximant_str is not None:
            if 'params' in self.approximant_str:
                t = type('t', (object,), {'params' : self.table[index]})
                approximant = str(self.parse_option(t, self.approximant_str)) 
            else:
                approximant = self.approximant_str
        else:
            raise ValueError("Reading approximant from template bank not yet supported")

        return approximant

    def __len__(self):
        return len(self.table)
Exemplo n.º 34
0
    def compute_acfs(cls,
                     fp,
                     start_index=None,
                     end_index=None,
                     per_walker=False,
                     walkers=None,
                     parameters=None,
                     temps=None):
        """Computes the autocorrleation function of the variable args in the
        given file.

        By default, parameter values are averaged over all walkers at each
        iteration. The ACF is then calculated over the averaged chain for each
        temperature. An ACF per-walker will be returned instead if
        ``per_walker=True``.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.
        per_walker : optional, bool
            Return the ACF for each walker separately. Default is False.
        walkers : optional, int or array
            Calculate the ACF using only the given walkers. If None (the
            default) all walkers will be used.
        parameters : optional, str or array
            Calculate the ACF for only the given parameters. If None (the
            default) will calculate the ACF for all of the variable args.
        temps : optional, (list of) int or 'all'
            The temperature index (or list of indices) to retrieve. If None
            (the default), the ACF will only be computed for the coldest (= 0)
            temperature chain. To compute an ACF for all temperates pass 'all',
            or a list of all of the temperatures.

        Returns
        -------
        FieldArray
            A ``FieldArray`` of the ACF vs iteration for each parameter. If
            `per-walker` is True, the FieldArray will have shape
            ``ntemps x nwalkers x niterations``. Otherwise, the returned
            array will have shape ``ntemps x niterations``.
        """
        acfs = {}
        if parameters is None:
            parameters = fp.variable_args
        if isinstance(parameters, str) or isinstance(parameters, unicode):
            parameters = [parameters]
        if isinstance(temps, int):
            temps = [temps]
        elif temps == 'all':
            temps = numpy.arange(fp.ntemps)
        elif temps is None:
            temps = [0]
        for param in parameters:
            subacfs = []
            for tk in temps:
                if per_walker:
                    # just call myself with a single walker
                    if walkers is None:
                        walkers = numpy.arange(fp.nwalkers)
                    arrays = [
                        cls.compute_acfs(fp,
                                         start_index=start_index,
                                         end_index=end_index,
                                         per_walker=False,
                                         walkers=ii,
                                         parameters=param,
                                         temps=tk)[param][0, :]
                        for ii in walkers
                    ]
                    # we'll stack all of the walker arrays to make a single
                    # nwalkers x niterations array; when these are stacked
                    # below, we'll get a ntemps x nwalkers x niterations array
                    subacfs.append(numpy.vstack(arrays))
                else:
                    samples = cls.read_samples(fp,
                                               param,
                                               thin_start=start_index,
                                               thin_interval=1,
                                               thin_end=end_index,
                                               walkers=walkers,
                                               temps=tk,
                                               flatten=False)[param]
                    # contract the walker dimension using the mean, and flatten
                    # the (length 1) temp dimension
                    samples = samples.mean(axis=1)[0, :]
                    thisacf = autocorrelation.calculate_acf(samples).numpy()
                    subacfs.append(thisacf)
            # stack the temperatures
            # FIXME: the following if/else can be condensed to a single line
            # using numpy.stack, once the version requirements are bumped to
            # numpy >= 1.10
            if per_walker:
                nw, ni = subacfs[0].shape
                acfs[param] = numpy.zeros((len(temps), nw, ni), dtype=float)
                for tk in range(len(temps)):
                    acfs[param][tk, ...] = subacfs[tk]
            else:
                acfs[param] = numpy.vstack(subacfs)
        return FieldArray.from_kwargs(**acfs)
Exemplo n.º 35
0
    def compute_acfs(cls, fp, start_index=None, end_index=None,
                     per_walker=False, walkers=None, parameters=None,
                     temps=None):
        """Computes the autocorrleation function of the variable args in the
        given file.

        By default, parameter values are averaged over all walkers at each
        iteration. The ACF is then calculated over the averaged chain for each
        temperature. An ACF per-walker will be returned instead if
        ``per_walker=True``.

        Parameters
        -----------
        fp : InferenceFile
            An open file handler to read the samples from.
        start_index : {None, int}
            The start index to compute the acl from. If None, will try to use
            the number of burn-in iterations in the file; otherwise, will start
            at the first sample.
        end_index : {None, int}
            The end index to compute the acl to. If None, will go to the end
            of the current iteration.
        per_walker : optional, bool
            Return the ACF for each walker separately. Default is False.
        walkers : optional, int or array
            Calculate the ACF using only the given walkers. If None (the
            default) all walkers will be used.
        parameters : optional, str or array
            Calculate the ACF for only the given parameters. If None (the
            default) will calculate the ACF for all of the variable args.
        temps : optional, (list of) int or 'all'
            The temperature index (or list of indices) to retrieve. If None
            (the default), the ACF will only be computed for the coldest (= 0)
            temperature chain. To compute an ACF for all temperates pass 'all',
            or a list of all of the temperatures.

        Returns
        -------
        FieldArray
            A ``FieldArray`` of the ACF vs iteration for each parameter. If
            `per-walker` is True, the FieldArray will have shape
            ``ntemps x nwalkers x niterations``. Otherwise, the returned
            array will have shape ``ntemps x niterations``.
        """
        acfs = {}
        if parameters is None:
            parameters = fp.variable_args
        if isinstance(parameters, str) or isinstance(parameters, unicode):
            parameters = [parameters]
        if isinstance(temps, int):
            temps = [temps]
        elif temps == 'all':
            temps = numpy.arange(fp.ntemps)
        elif temps is None:
            temps = [0]
        for param in parameters:
            subacfs = []
            for tk in temps:
                if per_walker:
                    # just call myself with a single walker
                    if walkers is None:
                        walkers = numpy.arange(fp.nwalkers)
                    arrays = [cls.compute_acfs(fp, start_index=start_index,
                                               end_index=end_index,
                                               per_walker=False, walkers=ii,
                                               parameters=param,
                                               temps=tk)[param][0,:]
                              for ii in walkers]
                    # we'll stack all of the walker arrays to make a single
                    # nwalkers x niterations array; when these are stacked
                    # below, we'll get a ntemps x nwalkers x niterations array
                    subacfs.append(numpy.vstack(arrays))
                else:
                    samples = cls.read_samples(fp, param,
                                               thin_start=start_index,
                                               thin_interval=1,
                                               thin_end=end_index,
                                               walkers=walkers, temps=tk,
                                               flatten=False)[param]
                    # contract the walker dimension using the mean, and flatten
                    # the (length 1) temp dimension
                    samples = samples.mean(axis=1)[0,:]
                    thisacf = autocorrelation.calculate_acf(samples).numpy()
                    subacfs.append(thisacf)
            # stack the temperatures
            # FIXME: the following if/else can be condensed to a single line
            # using numpy.stack, once the version requirements are bumped to
            # numpy >= 1.10
            if per_walker:
                nw, ni = subacfs[0].shape
                acfs[param] = numpy.zeros((len(temps), nw, ni), dtype=float)
                for tk in range(len(temps)):
                    acfs[param][tk,...] = subacfs[tk]
            else:
                acfs[param] = numpy.vstack(subacfs)
        return FieldArray.from_kwargs(**acfs)
Exemplo n.º 36
0
def check_coinc_results(args):
    coinc_fail = False

    # read injections
    with h5py.File(args.injections, 'r') as injfile:
        inj_mass1 = injfile['mass1'][:]
        inj_mass2 = injfile['mass2'][:]
        inj_spin1z = injfile['spin1z'][:]
        inj_spin2z = injfile['spin2z'][:]
        inj_time = injfile['tc'][:]

    # gather coincident triggers
    coinc_trig_paths = sorted(glob.glob('output/coinc*.xml.gz'))
    n_coincs = len(coinc_trig_paths)
    if n_coincs == 0:
        log.error('No coincident triggers detected')
        coinc_fail = True
    elif n_coincs >= 10:
        log.error('Too many coincident triggers detected')
        coinc_fail = True
    else:
        log.info('%d coincident trigger(s) detected', n_coincs)

    # create field array to store properties of triggers
    dtype = [('mass1', float), ('mass2', float),
             ('spin1z', float), ('spin2z', float),
             ('tc', float), ('net_snr', float)]
    trig_props = FieldArray(n_coincs, dtype=dtype)

    # store properties of coincident triggers
    for x, ctrigfp in enumerate(coinc_trig_paths):
        log.info('Checking trigger %s', ctrigfp)
        xmldoc = load_xml_doc(
            ctrigfp, False, contenthandler=LIGOLWContentHandler)
        si_table = lsctables.SnglInspiralTable.get_table(xmldoc)

        trig_props['tc'][x] = si_table[0].end
        trig_props['mass1'][x] = si_table[0].mass1
        trig_props['mass2'][x] = si_table[0].mass2
        trig_props['spin1z'][x] = si_table[0].spin1z
        trig_props['spin2z'][x] = si_table[0].spin2z

        snr_list = si_table.getColumnByName('snr').asarray()
        trig_props['net_snr'][x] = sum(snr_list ** 2) ** 0.5

        log.info('Single-detector SNRs: %s', snr_list)
        log.info('Network SNR: %f', trig_props['net_snr'][x])
        log.info('Merger time: %f', trig_props['tc'][x])
        log.info('Mass 1: %f', trig_props['mass1'][x])
        log.info('Mass 2: %f', trig_props['mass2'][x])
        log.info('Spin1z: %f', trig_props['spin1z'][x])
        log.info('Spin2z: %f', trig_props['spin2z'][x])

    # check if injections match trigger params
    for i in range(len(inj_mass1)):
        has_match = False
        for j in range(n_coincs):
            # FIXME should calculate the optimal SNRs of the injections
            # and use those for checking net_snr
            if (close(inj_time[i], trig_props['tc'][j], 1.0)
                    and close(inj_mass1[i], trig_props['mass1'][j], 1e-5)
                    and close(inj_mass2[i], trig_props['mass2'][j], 1e-5)
                    and close(inj_spin1z[i], trig_props['spin1z'][j], 1e-5)
                    and close(inj_spin2z[i], trig_props['spin2z'][j], 1e-5)
                    and close(15.0, trig_props['net_snr'][j], 2.0)):
                has_match = True
                break

        if not has_match:
            coinc_fail = True
            log.error('Injection %i was missed', i)

    if coinc_fail:
        log.error('Coincident Trigger Test Failed')
    return coinc_fail