Пример #1
0
    def batched_differential_rate(self, progress=True, **params):
        progress = (lambda x: x) if not progress else tqdm
        y = []
        for i_batch in progress(range(self.n_batches)):
            q = self.data_tensor[i_batch]
            y.append(
                fd.tf_to_np(self.differential_rate(data_tensor=q, **params)))

        return np.concatenate(y)[:self.n_events]
Пример #2
0
 def batched_differential_rate(self, progress=True, **params):
     progress = (lambda x: x) if not progress else tqdm
     y = np.concatenate([
         fd.tf_to_np(
             self.differential_rate(data_tensor=self.data_tensor[i_batch],
                                    **params))
         for i_batch in progress(range(self.n_batches))
     ])
     return y[:self.n_events]
Пример #3
0
def test_underscore_diff_rate(xes: fd.ERSource):

    x = xes._differential_rate(data_tensor=xes.data_tensor[0],
                               ptensor=xes.ptensor_from_kwargs())
    assert isinstance(x, tf.Tensor)
    assert x.dtype == fd.float_type()

    y = xes._differential_rate(data_tensor=xes.data_tensor[0],
                               ptensor=xes.ptensor_from_kwargs(elife=100e3))
    np.testing.assert_array_less(-fd.tf_to_np(tf.abs(x - y)), 0)
Пример #4
0
    def gimme(self,
              fname,
              data_tensor=None,
              ptensor=None,
              bonus_arg=None,
              numpy_out=False):
        """Evaluate the model function fname with all required arguments

        :param fname: Name of the model function to compute
        :param bonus_arg: If fname takes a bonus argument, the data for it
        :param numpy_out: If True, return (tuple of) numpy arrays,
        otherwise (tuple of) tensors.
        :param data_tensor: Data tensor, columns as self.name_id
        If not given, use self.data (used in annotate)
        :param ptensor: Parameter tensor, columns as self.param_id
        If not give, use defaults dictionary (used in annotate)
        Before using gimme, you must use set_data to
        populate the internal caches.
        """
        # TODO: make a clean way to keep track of i_batch or have it as input
        assert (bonus_arg is not None) == (fname in self.special_data_methods)

        if data_tensor is None:
            # We're in an annotate
            assert hasattr(self, 'data'), "You must set data first"
        else:
            # We're computing
            if not hasattr(self, 'name_id'):
                raise ValueError(
                    "You must set_data first (and populate the tensor cache)")

        f = getattr(self, fname)

        if callable(f):
            args = [self._fetch(x, data_tensor) for x in self.f_dims[fname]]
            if bonus_arg is not None:
                args = [bonus_arg] + args
            kwargs = {
                pname: self._fetch_param(pname, ptensor)
                for pname in self.f_params[fname]
            }
            res = f(*args, **kwargs)

        else:
            if bonus_arg is None:
                n = len(
                    self.data) if data_tensor is None else data_tensor.shape[0]
                x = tf.ones(n, dtype=fd.float_type())
            else:
                x = tf.ones_like(bonus_arg, dtype=fd.float_type())
            res = f * x

        if numpy_out:
            return fd.tf_to_np(res)
        return fd.np_to_tf(res)
Пример #5
0
    def summary(self, bestfit=None, fix=None, guess=None,
                inverse_hessian=None, precision=3):
        """Print summary information about best fit"""
        if fix is None:
            fix = dict()
        if bestfit is None:
            bestfit = self.bestfit(guess=guess, fix=fix)

        params = {**bestfit, **fix}
        if inverse_hessian is None:
            inverse_hessian = self.inverse_hessian(
                params,
                omit_grads=tuple(fix.keys()))
        inverse_hessian = fd.tf_to_np(inverse_hessian)

        stderr, cov = cov_to_std(inverse_hessian)

        var_par_i = 0
        for i, pname in enumerate(self.param_names):
            if pname in fix:
                print("{pname}: {x:.{precision}g} (fixed)".format(
                    pname=pname, x=fix[pname], precision=precision))
            else:
                template = "{pname}: {x:.{precision}g} +- {xerr:.{precision}g}"
                print(template.format(
                    pname=pname,
                    x=bestfit[pname],
                    xerr=stderr[var_par_i],
                    precision=precision))
                var_par_i += 1

        var_pars = [x for x in self.param_names if x not in fix]
        df = pd.DataFrame(
            {p1: {p2: cov[i1, i2]
                  for i2, p2 in enumerate(var_pars)}
             for i1, p1 in enumerate(var_pars)},
            columns=var_pars)

        # Get rows in the correct order
        df['index'] = [var_pars.index(x)
                       for x in df.index.values]
        df = df.sort_values(by='index')
        del df['index']

        print("Correlation matrix:")
        pd.set_option('precision', 3)
        print(df)
        pd.reset_option('precision')
Пример #6
0
 def parse_result(self, result):
     if result.failed:
         self.fail(f"TFP optimizer failed! Result: {result}")
     return (dict(zip(self.arg_names, fd.tf_to_np(result.position))),
             result.objective_value)