Esempio n. 1
0
def __getattr__(name):
    if name == 'tensorlib':
        return get_backend(default=False)[0]
    if name == 'optimizer':
        return get_backend(default=False)[1]
    if name == 'default_backend':
        return get_backend(default=True)[0]
    raise AttributeError
Esempio n. 2
0
 def __call__(self, alphasets):
     tensorlib, _ = get_backend()
     return tensorlib.astensor(
         _slow_interpolator_looper(
             self._histogramssets, tensorlib.tolist(alphasets), self.product
         )
     )
Esempio n. 3
0
 def _precompute(self):
     if not self.param_viewer.index_selection:
         return
     tensorlib, _ = get_backend()
     self.sigmas = tensorlib.astensor(self._sigmas)
     self.normal_data = tensorlib.astensor(self._normal_data, dtype='int')
     self.access_field = tensorlib.astensor(self._access_field, dtype='int')
Esempio n. 4
0
    def make_pdf(self, pars):
        """
        Args:
            pars (:obj:`tensor`): The model parameters

        Returns:
            pdf: the pdf object for the Poisson Constraint
        """
        if not self.param_viewer.index_selection:
            return None
        tensorlib, _ = get_backend()
        if self.batch_size is None:
            flat_pars = pars
        else:
            flat_pars = tensorlib.reshape(pars, (-1, ))
        nuispars = tensorlib.gather(flat_pars, self.access_field)

        # similar to expected_data() in constrained_by_poisson
        # we multiply by the appropriate factor to achieve
        # the desired variance for poisson-type constraints
        pois_rates = tensorlib.product(tensorlib.stack(
            [nuispars, self.batched_factors]),
                                       axis=0)
        if self.batch_size is None:
            pois_rates = pois_rates[0]
        # pdf pars are done, now get data and compute
        return prob.Independent(prob.Poisson(pois_rates),
                                batch_size=self.batch_size)
Esempio n. 5
0
 def _precompute(self):
     if not self.param_viewer.index_selection:
         return
     tensorlib, _ = get_backend()
     self.poisson_data = tensorlib.astensor(self._poisson_data, dtype='int')
     self.access_field = tensorlib.astensor(self._access_field, dtype='int')
     self.batched_factors = tensorlib.astensor(self._batched_factors)
Esempio n. 6
0
    def make_pdf(self, pars):
        """
        Args:
            pars (:obj:`tensor`): The model parameters

        Returns:
            pdf: The pdf object for the Normal Constraint
        """
        tensorlib, _ = get_backend()
        if not self.param_viewer.index_selection:
            return None
        if self.batch_size is None:
            flat_pars = pars
        else:
            flat_pars = tensorlib.reshape(pars, (-1, ))

        normal_means = tensorlib.gather(flat_pars, self.access_field)

        # pdf pars are done, now get data and compute
        if self.batch_size is None:
            normal_means = normal_means[0]

        result = prob.Independent(prob.Normal(normal_means, self.sigmas),
                                  batch_size=self.batch_size)
        return result
Esempio n. 7
0
 def _precompute(self):
     tensorlib, _ = get_backend()
     self.deltas_up = tensorlib.astensor(self._deltas_up)
     self.deltas_dn = tensorlib.astensor(self._deltas_dn)
     self.broadcast_helper = tensorlib.astensor(self._broadcast_helper)
     self.mask_on = tensorlib.ones(self.alphasets_shape)
     self.mask_off = tensorlib.zeros(self.alphasets_shape)
Esempio n. 8
0
def _get_tensor_shim():
    """
    A shim-retriever to lazy-retrieve the necessary shims as needed.

    Because pyhf.tensor is a lazy-retriever for the backends, we can be sure
    that tensorlib is imported correctly.
    """
    tensorlib, _ = get_backend()
    if tensorlib.name == 'numpy':
        from pyhf.optimize.opt_numpy import wrap_objective as numpy_shim

        return numpy_shim

    if tensorlib.name == 'tensorflow':
        from pyhf.optimize.opt_tflow import wrap_objective as tflow_shim

        return tflow_shim

    if tensorlib.name == 'pytorch':
        from pyhf.optimize.opt_pytorch import wrap_objective as pytorch_shim

        return pytorch_shim

    if tensorlib.name == 'jax':
        from pyhf.optimize.opt_jax import wrap_objective as jax_shim

        return jax_shim
    raise ValueError(f'No optimizer shim for {tensorlib.name}.')
Esempio n. 9
0
 def _precompute_alphasets(self, alphasets_shape):
     if alphasets_shape == self.alphasets_shape:
         return
     tensorlib, _ = get_backend()
     self.alphasets_shape = alphasets_shape
     self.mask_on = tensorlib.ones(self.alphasets_shape)
     self.mask_off = tensorlib.zeros(self.alphasets_shape)
Esempio n. 10
0
    def __call__(self, alphasets):
        """Compute Interpolated Values."""
        tensorlib, _ = get_backend()
        self._precompute_alphasets(tensorlib.shape(alphasets))

        # select where alpha >= alpha0 and produce the mask
        where_alphasets_gtalpha0 = tensorlib.where(alphasets >= self.__alpha0,
                                                   self.mask_on, self.mask_off)
        masks_gtalpha0 = tensorlib.astensor(
            tensorlib.einsum('sa,shb->shab', where_alphasets_gtalpha0,
                             self.broadcast_helper),
            dtype="bool",
        )

        # select where alpha > -alpha0 ["not(alpha <= -alpha0)"] and produce the mask
        where_alphasets_not_ltalpha0 = tensorlib.where(
            alphasets > -self.__alpha0, self.mask_on, self.mask_off)
        masks_not_ltalpha0 = tensorlib.astensor(
            tensorlib.einsum('sa,shb->shab', where_alphasets_not_ltalpha0,
                             self.broadcast_helper),
            dtype="bool",
        )

        # s: set under consideration (i.e. the modifier)
        # a: alpha variation
        # h: histogram affected by modifier
        # b: bin of histogram
        exponents = tensorlib.einsum('sa,shb->shab', tensorlib.abs(alphasets),
                                     self.broadcast_helper)
        # for |alpha| >= alpha0, we want to raise the bases to the exponent=alpha
        # and for |alpha| < alpha0, we want to raise the bases to the exponent=1
        masked_exponents = tensorlib.where(exponents >= self.__alpha0,
                                           exponents, self.ones)
        # we need to produce the terms of alpha^i for summing up
        alphasets_powers = tensorlib.stack([
            alphasets,
            tensorlib.power(alphasets, 2),
            tensorlib.power(alphasets, 3),
            tensorlib.power(alphasets, 4),
            tensorlib.power(alphasets, 5),
            tensorlib.power(alphasets, 6),
        ])
        # this is the 1 + sum_i a_i alpha^i
        value_btwn = tensorlib.ones(exponents.shape) + tensorlib.einsum(
            'rshb,rsa->shab', self.coefficients, alphasets_powers)

        # first, build a result where:
        #       alpha > alpha0   : fill with bases_up
        #   not(alpha > alpha0)  : fill with 1 + sum(a_i alpha^i)
        results_gtalpha0_btwn = tensorlib.where(masks_gtalpha0, self.bases_up,
                                                value_btwn)
        # then, build a result where:
        #      alpha >= -alpha0  : do nothing (fill with previous result)
        #   not(alpha >= -alpha0): fill with bases_dn
        bases = tensorlib.where(masks_not_ltalpha0, results_gtalpha0_btwn,
                                self.bases_dn)
        return tensorlib.power(bases, masked_exponents)
Esempio n. 11
0
 def _precompute(self):
     tensorlib, _ = get_backend()
     self.a = tensorlib.astensor(self._a)
     self.b = tensorlib.astensor(self._b)
     self.b_plus_2a = tensorlib.astensor(self._b_plus_2a)
     self.b_minus_2a = tensorlib.astensor(self._b_minus_2a)
     # make up the masks correctly
     self.broadcast_helper = tensorlib.astensor(self._broadcast_helper)
     self.mask_on = tensorlib.ones(self.alphasets_shape)
     self.mask_off = tensorlib.zeros(self.alphasets_shape)
Esempio n. 12
0
 def _precompute(self):
     tensorlib, _ = get_backend()
     self.sorted_indices = tensorlib.astensor(self._sorted_indices,
                                              dtype='int')
     self.partition_indices = [
         tensorlib.astensor(idx, dtype='int')
         for idx in self._partition_indices
     ]
     if self.names:
         self.name_map = dict(zip(self.names, self.partition_indices))
Esempio n. 13
0
 def _precompute(self):
     if not self.param_viewer.index_selection:
         return
     tensorlib, _ = get_backend()
     self.histosys_mask = tensorlib.astensor(self._histosys_mask,
                                             dtype="bool")
     self.histosys_default = tensorlib.zeros(self.histosys_mask.shape)
     if self.batch_size is None:
         self.indices = tensorlib.reshape(
             self.param_viewer.indices_concatenated, (-1, 1))
Esempio n. 14
0
    def _precompute(self):
        tensorlib, _ = get_backend()

        self.all_indices = tensorlib.astensor(self._all_indices)
        (
            self.index_selection,
            self.stitched,
            self.indices_concatenated,
        ) = extract_index_access(self.allpar_viewer, self.selected_viewer,
                                 self.all_indices)
Esempio n. 15
0
    def __call__(self, alphasets):
        """Compute Interpolated Values."""
        tensorlib, _ = get_backend()
        self._precompute_alphasets(tensorlib.shape(alphasets))
        where_alphasets_greater_p1 = tensorlib.where(alphasets > 1,
                                                     self.mask_on,
                                                     self.mask_off)

        where_alphasets_smaller_m1 = tensorlib.where(alphasets < -1,
                                                     self.mask_on,
                                                     self.mask_off)

        # s: set under consideration (i.e. the modifier)
        # a: alpha variation
        # h: histogram affected by modifier
        # b: bin of histogram

        # for a > 1
        alphas_times_deltas_up = tensorlib.einsum('sa,shb->shab', alphasets,
                                                  self.deltas_up)

        # for a < -1
        alphas_times_deltas_dn = tensorlib.einsum('sa,shb->shab', alphasets,
                                                  self.deltas_dn)

        # for |a| < 1
        asquare = tensorlib.power(alphasets, 2)
        tmp1 = asquare * 3.0 - 10.0
        tmp2 = asquare * tmp1 + 15.0
        tmp3 = asquare * tmp2

        tmp3_times_A = tensorlib.einsum('sa,shb->shab', tmp3, self.A)

        alphas_times_S = tensorlib.einsum('sa,shb->shab', alphasets, self.S)

        deltas = tmp3_times_A + alphas_times_S
        # end |a| < 1

        masks_p1 = tensorlib.astensor(
            tensorlib.einsum('sa,shb->shab', where_alphasets_greater_p1,
                             self.broadcast_helper),
            dtype='bool',
        )

        masks_m1 = tensorlib.astensor(
            tensorlib.einsum('sa,shb->shab', where_alphasets_smaller_m1,
                             self.broadcast_helper),
            dtype='bool',
        )

        return tensorlib.where(
            masks_m1,
            alphas_times_deltas_dn,
            tensorlib.where(masks_p1, alphas_times_deltas_up, deltas),
        )
Esempio n. 16
0
 def split(self, data, selection=None):
     tensorlib, _ = get_backend()
     indices = (self.partition_indices if selection is None else
                [self.name_map[n] for n in selection])
     if len(tensorlib.shape(data)) == 1:
         return [tensorlib.gather(data, idx) for idx in indices]
     data = tensorlib.einsum('...j->j...', tensorlib.astensor(data))
     return [
         tensorlib.einsum('j...->...j', tensorlib.gather(data, idx))
         for idx in indices
     ]
Esempio n. 17
0
    def apply(self, pars):
        """
        Returns:
            modification tensor: Shape (n_modifiers, n_global_samples, n_alphas, n_global_bin)
        """
        tensorlib, _ = get_backend()
        if not self.param_viewer.index_selection:
            return
        tensorlib, _ = get_backend()
        if self.batch_size is None:
            flat_pars = pars
        else:
            flat_pars = tensorlib.reshape(pars, (-1, ))
        shapefactors = tensorlib.gather(flat_pars, self.access_field)
        results_shapesys = tensorlib.einsum('mab,s->msab', shapefactors,
                                            self.sample_ones)

        results_shapesys = tensorlib.where(self.shapesys_mask,
                                           results_shapesys,
                                           self.shapesys_default)
        return results_shapesys
Esempio n. 18
0
    def stitch(self, data):
        tensorlib, _ = get_backend()
        assert len(self.partition_indices) == len(data)

        data = tensorlib.concatenate(data, axis=-1)
        if len(tensorlib.shape(data)) == 1:
            stitched = tensorlib.gather(data, self.sorted_indices)
        else:
            data = tensorlib.einsum('...j->j...', data)
            stitched = tensorlib.gather(data, self.sorted_indices)
            stitched = tensorlib.einsum('j...->...j', stitched)
        return stitched
Esempio n. 19
0
 def _precompute(self):
     tensorlib, _ = get_backend()
     if not self.param_viewer.index_selection:
         return
     self.shapesys_mask = tensorlib.astensor(self._shapesys_mask,
                                             dtype="bool")
     self.shapesys_mask = tensorlib.tile(self.shapesys_mask,
                                         (1, 1, self.batch_size or 1, 1))
     self.access_field = tensorlib.astensor(self._access_field, dtype='int')
     self.sample_ones = tensorlib.ones(
         tensorlib.shape(self.shapesys_mask)[1])
     self.shapesys_default = tensorlib.ones(
         tensorlib.shape(self.shapesys_mask))
Esempio n. 20
0
    def pdf(self, pars, data):
        """
        Compute the density at a given observed point in data space of the full model.

        Args:
            pars (:obj:`tensor`): The parameter values
            data (:obj:`tensor`): The measurement data

        Returns:
            Tensor: The density value

        """
        tensorlib, _ = get_backend()
        return tensorlib.exp(self.logpdf(pars, data))
Esempio n. 21
0
    def expected_actualdata(self, pars):
        """
        Compute the expected value of the main model.

        Args:
            pars (:obj:`tensor`): The parameter values

        Returns:
            Tensor: The expected data of the main model (no auxiliary data)

        """
        tensorlib, _ = get_backend()
        pars = tensorlib.astensor(pars)
        return self.make_pdf(pars)[0].expected_data()
Esempio n. 22
0
    def expected_auxdata(self, pars):
        """
        Compute the expected value of the auxiliary measurements.

        Args:
            pars (:obj:`tensor`): The parameter values

        Returns:
            Tensor: The expected data of the auxiliary pdf

        """
        tensorlib, _ = get_backend()
        pars = tensorlib.astensor(pars)
        return self.make_pdf(pars)[1].expected_data()
Esempio n. 23
0
    def expected_data(self, pars, return_by_sample=False):
        """
        Compute the expected rates for given values of parameters.

        For a single channel single sample, we compute:

            Pois(d | fac(pars) * (delta(pars) + nom) ) * Gaus(a | pars[is_gaus], sigmas) * Pois(a * cfac | pars[is_poi] * cfac)

        where:
            - delta(pars) is the result of an apply(pars) of combined modifiers
              with 'addition' op_code
            - factor(pars) is the result of apply(pars) of combined modifiers
              with 'multiplication' op_code
            - pars[is_gaus] are the subset of parameters that are constrained by
              gauss (with sigmas accordingly, some of which are computed by
              modifiers)
            - pars[is_pois] are the poissons and their rates (they come with
              their own additional factors unrelated to factor(pars) which are
              also computed by the finalize() of the modifier)

        So in the end we only make 3 calls to pdfs

            1. The pdf of data and modified rates
            2. All Gaussian constraint as one call
            3. All Poisson constraints as one call

        """
        tensorlib, _ = get_backend()
        pars = tensorlib.astensor(pars)
        deltas, factors = self._modifications(pars)

        allsum = tensorlib.concatenate(deltas + [self.nominal_rates])

        nom_plus_delta = tensorlib.sum(allsum, axis=0)
        nom_plus_delta = tensorlib.reshape(nom_plus_delta, (1, ) +
                                           tensorlib.shape(nom_plus_delta))

        allfac = tensorlib.concatenate(factors + [nom_plus_delta])

        newbysample = tensorlib.product(allfac, axis=0)
        if return_by_sample:
            batch_first = tensorlib.einsum('ij...->ji...', newbysample)
            if self.batch_size is None:
                return batch_first[0]
            return batch_first

        newresults = tensorlib.sum(newbysample, axis=0)
        if self.batch_size is None:
            return newresults[0]
        return newresults
Esempio n. 24
0
 def _precompute_alphasets(self, alphasets_shape):
     if alphasets_shape == self.alphasets_shape:
         return
     tensorlib, _ = get_backend()
     self.alphasets_shape = alphasets_shape
     self.bases_up = tensorlib.einsum(
         'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_up
     )
     self.bases_dn = tensorlib.einsum(
         'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_dn
     )
     self.mask_on = tensorlib.ones(self.alphasets_shape)
     self.mask_off = tensorlib.zeros(self.alphasets_shape)
     return
Esempio n. 25
0
    def apply(self, pars):
        if not self.param_viewer.index_selection:
            return

        tensorlib, _ = get_backend()
        if self.batch_size is None:
            flat_pars = pars
        else:
            flat_pars = tensorlib.reshape(pars, (-1, ))
        statfactors = tensorlib.gather(flat_pars, self.access_field)
        results_staterr = tensorlib.einsum('mab,s->msab', statfactors,
                                           self.sample_ones)
        results_staterr = tensorlib.where(self.staterror_mask, results_staterr,
                                          self.staterror_default)
        return results_staterr
Esempio n. 26
0
    def logpdf(self, auxdata, pars):
        """
        Args:
            auxdata (:obj:`tensor`): The auxiliary data (a subset of the full data in a HistFactory model)
            pars (:obj:`tensor`): The model parameters

        Returns:
            log pdf value: The log of the pdf value of the Poisson constraints
        """
        tensorlib, _ = get_backend()
        pdf = self.make_pdf(pars)
        if pdf is None:
            return (tensorlib.zeros(self.batch_size) if self.batch_size
                    is not None else tensorlib.astensor(0.0)[0])
        poisson_data = tensorlib.gather(auxdata, self.poisson_data)
        return pdf.log_prob(poisson_data)
Esempio n. 27
0
    def expected_data(self, pars, include_auxdata=True):
        """
        Compute the expected value of the main model

        Args:
            pars (:obj:`tensor`): The parameter values

        Returns:
            Tensor: The expected data of the main and auxiliary model

        """
        tensorlib, _ = get_backend()
        pars = tensorlib.astensor(pars)
        if not include_auxdata:
            return self.make_pdf(pars)[0].expected_data()
        return self.make_pdf(pars).expected_data()
Esempio n. 28
0
 def _precompute(self):
     tensorlib, _ = get_backend()
     self.deltas_up = tensorlib.astensor(self._deltas_up)
     self.deltas_dn = tensorlib.astensor(self._deltas_dn)
     self.broadcast_helper = tensorlib.astensor(self._broadcast_helper)
     self.alpha0 = tensorlib.astensor(self._alpha0)
     self.coefficients = tensorlib.astensor(self._coefficients)
     self.bases_up = tensorlib.einsum('sa,shb->shab',
                                      tensorlib.ones(self.alphasets_shape),
                                      self.deltas_up)
     self.bases_dn = tensorlib.einsum('sa,shb->shab',
                                      tensorlib.ones(self.alphasets_shape),
                                      self.deltas_dn)
     self.mask_on = tensorlib.ones(self.alphasets_shape)
     self.mask_off = tensorlib.zeros(self.alphasets_shape)
     self.ones = tensorlib.einsum('sa,shb->shab', self.mask_on,
                                  self.broadcast_helper)
Esempio n. 29
0
def extract_index_access(baseviewer, subviewer, indices):
    tensorlib, _ = get_backend()

    index_selection = []
    stitched = None
    indices_concatenated = None
    if subviewer:
        index_selection = baseviewer.split(indices, selection=subviewer.names)
        stitched = subviewer.stitch(index_selection)

        # the transpose is here so that modifier code doesn't have to do it
        indices_concatenated = tensorlib.astensor(
            tensorlib.einsum('ij->ji', stitched)
            if len(tensorlib.shape(stitched)) > 1 else stitched,
            dtype='int',
        )
    return index_selection, stitched, indices_concatenated
Esempio n. 30
0
    def _internal_postprocess(self, fitresult, stitch_pars, return_uncertainties=False):
        """
        Post-process the fit result.

        Returns:
            fitresult (scipy.optimize.OptimizeResult): A modified version of the fit result.
        """
        tensorlib, _ = get_backend()

        # stitch in missing parameters (e.g. fixed parameters)
        fitted_pars = stitch_pars(tensorlib.astensor(fitresult.x))

        # check if uncertainties were provided (and stitch just in case)
        uncertainties = getattr(fitresult, 'unc', None)
        if uncertainties is not None:
            # extract number of fixed parameters
            num_fixed_pars = len(fitted_pars) - len(fitresult.x)
            # stitch in zero-uncertainty for fixed values
            uncertainties = stitch_pars(
                tensorlib.astensor(uncertainties),
                stitch_with=tensorlib.zeros(num_fixed_pars),
            )
            if return_uncertainties:
                fitted_pars = tensorlib.stack([fitted_pars, uncertainties], axis=1)

        correlations = getattr(fitresult, 'corr', None)
        if correlations is not None:
            _zeros = tensorlib.zeros(num_fixed_pars)
            # possibly a more elegant way to do this
            stitched_columns = [
                stitch_pars(tensorlib.astensor(column), stitch_with=_zeros)
                for column in zip(*correlations)
            ]
            stitched_rows = [
                stitch_pars(tensorlib.astensor(row), stitch_with=_zeros)
                for row in zip(*stitched_columns)
            ]
            correlations = tensorlib.stack(stitched_rows, axis=1)

        fitresult.x = fitted_pars
        fitresult.fun = tensorlib.astensor(fitresult.fun)
        fitresult.unc = uncertainties
        fitresult.corr = correlations

        return fitresult