Exemplo n.º 1
0
def calculate_normalised_xsection(inputs,
                                  bin_widths,
                                  normalise_to_one=False,
                                  covariance_matrix=None,
                                  inputMC_covariance_matrix=None):
    """
        Calculates normalised average x-section for each bin: 1/N *1/bin_width sigma_i
        There are two ways to calculate this
            1) N = sum(sigma_i)
            2) N = sum(sigma_i/bin_width)
        The latter one will normalise the total distribution to 1
        @param inputs: list of value-error pairs
        @param bin_widths: bin widths of the inputs
    """
    values = [u.ufloat(i[0], i[1]) for i in inputs]

    norm_cov_matrix = None
    norm_corr_matrix = None
    inputMC_norm_cov_matrix = None
    if not covariance_matrix is None and not normalise_to_one:
        values_correlated = u.correlated_values(
            [v.nominal_value for v in values], covariance_matrix.tolist())
        norm = sum(values_correlated)
        norm_values_correlated = []
        # Loop over unfolded number of events with correlated uncertainties
        # And corresponding bin width
        # Calculate normalised cross section, with correctly correlated uncertainty
        for v, width in zip(values_correlated, bin_widths):
            norm_values_correlated.append(v / width / norm)
        # Get covariance and correlation matrix for normalised cross section
        norm_cov_matrix = matrix(u.covariance_matrix(norm_values_correlated))
        norm_corr_matrix = matrix(u.correlation_matrix(norm_values_correlated))
        result = [(v.nominal_value, v.std_dev) for v in norm_values_correlated]
        # Get Covariance Matrix for input MC
        if not inputMC_covariance_matrix is None:
            inputMC_values_correlated = u.correlated_values(
                [v.nominal_value for v in values],
                inputMC_covariance_matrix.tolist())
            inputMC_norm = sum(inputMC_values_correlated)
            inputMC_norm_values_correlated = []
            for v, width in zip(inputMC_values_correlated, bin_widths):
                inputMC_norm_values_correlated.append(v / width / inputMC_norm)
            inputMC_norm_cov_matrix = matrix(
                u.covariance_matrix(inputMC_norm_values_correlated))
    else:
        normalisation = 0
        if normalise_to_one:
            normalisation = sum([
                value / bin_width
                for value, bin_width in zip(values, bin_widths)
            ])
        else:
            normalisation = sum(values)
        xsections = [(1 / bin_width) * value / normalisation
                     for value, bin_width in zip(values, bin_widths)]
        result = [(xsection.nominal_value, xsection.std_dev)
                  for xsection in xsections]
    return result, norm_cov_matrix, norm_corr_matrix, inputMC_norm_cov_matrix
Exemplo n.º 2
0
    def test_correlated_values():
        "Correlated variables."

        u = uncertainties.ufloat((1, 0.1))
        cov = uncertainties.covariance_matrix([u])
        # "1" is used instead of u.nominal_value because
        # u.nominal_value might return a float.  The idea is to force
        # the new variable u2 to be defined through an integer nominal
        # value:
        u2, = uncertainties.correlated_values([1], cov)
        expr = 2 * u2  # Calculations with u2 should be possible, like with u

        ####################

        # Covariances between output and input variables:

        x = ufloat((1, 0.1))
        y = ufloat((2, 0.3))
        z = -3 * x + y

        covs = uncertainties.covariance_matrix([x, y, z])

        # "Inversion" of the covariance matrix: creation of new
        # variables:
        (x_new, y_new, z_new) = uncertainties.correlated_values(
            [x.nominal_value, y.nominal_value, z.nominal_value], covs, tags=["x", "y", "z"]
        )

        # Even the uncertainties should be correctly reconstructed:
        assert matrices_close(numpy.array((x, y, z)), numpy.array((x_new, y_new, z_new)))

        # ... and the covariances too:
        assert matrices_close(numpy.array(covs), numpy.array(uncertainties.covariance_matrix([x_new, y_new, z_new])))

        assert matrices_close(numpy.array([z_new]), numpy.array([-3 * x_new + y_new]))

        ####################

        # ... as well as functional relations:

        u = ufloat((1, 0.05))
        v = ufloat((10, 0.1))
        sum_value = u + 2 * v

        # Covariance matrices:
        cov_matrix = uncertainties.covariance_matrix([u, v, sum_value])

        # Correlated variables can be constructed from a covariance matrix, if
        # NumPy is available:
        (u2, v2, sum2) = uncertainties.correlated_values([x.nominal_value for x in [u, v, sum_value]], cov_matrix)

        # matrices_close() is used instead of _numbers_close() because
        # it compares uncertainties too:
        assert matrices_close(numpy.array([0]), numpy.array([sum2 - (u2 + 2 * v2)]))
Exemplo n.º 3
0
def calculate_xsection(inputs,
                       bin_widths,
                       luminosity,
                       efficiency=1.,
                       covariance_matrix=None,
                       inputMC_covariance_matrix=None):
    '''
    BUG: this doesn't work unless the inputs are unfolded!
    inputs = list of value-error pairs
    luminosity = integrated luminosity of the measurement
    '''
    abs_cov_matrix = None
    abs_corr_matrix = None
    inputMC_abs_cov_matrix = None
    result = []
    add_result = result.append

    values = [u.ufloat(i[0], i[1]) for i in inputs]

    if not covariance_matrix is None:
        values_correlated = u.correlated_values(
            [v.nominal_value for v in values], covariance_matrix.tolist())
        abs_values_correlated = []
        # Loop over unfolded number of events with correlated uncertainties
        # And corresponding bin width
        # Calculate absolute cross section, with correctly correlated uncertainty
        for v, width in zip(values_correlated, bin_widths):
            abs_values_correlated.append(v / width / luminosity / efficiency)
        # Get covariance and correlation matrix for absolute cross section
        abs_cov_matrix = matrix(u.covariance_matrix(abs_values_correlated))
        abs_corr_matrix = matrix(u.correlation_matrix(abs_values_correlated))
        result = [(v.nominal_value, v.std_dev) for v in abs_values_correlated]
        # Get Covariance Matrix for input MC
        if not inputMC_covariance_matrix is None:
            inputMC_values_correlated = u.correlated_values(
                [v.nominal_value for v in values],
                inputMC_covariance_matrix.tolist())
            inputMC_abs_values_correlated = []
            for v, width in zip(inputMC_values_correlated, bin_widths):
                inputMC_abs_values_correlated.append(v / width / luminosity /
                                                     efficiency)
            inputMC_abs_cov_matrix = matrix(
                u.covariance_matrix(inputMC_abs_values_correlated))
    else:
        for valueAndErrors, binWidth in zip(inputs, bin_widths):
            value = valueAndErrors[0]
            error = valueAndErrors[1]
            xsection = value / (luminosity * efficiency * binWidth)
            xsection_error = error / (luminosity * efficiency * binWidth)
            add_result((xsection, xsection_error))
    return result, abs_cov_matrix, abs_corr_matrix, inputMC_abs_cov_matrix
Exemplo n.º 4
0
def test_copy():
    "Standard copy module integration"
    import gc

    x = ufloat((3, 0.1))
    assert x == x

    y = copy.copy(x)
    assert x != y
    assert not (x == y)
    assert y in list(
        y.derivatives.keys())  # y must not copy the dependence on x

    z = copy.deepcopy(x)
    assert x != z

    # Copy tests on expressions:
    t = x + 2 * z
    # t depends on x:
    assert x in t.derivatives

    # The relationship between the copy of an expression and the
    # original variables should be preserved:
    t_copy = copy.copy(t)
    # Shallow copy: the variables on which t depends are not copied:
    assert x in t_copy.derivatives
    assert (uncertainties.covariance_matrix(
        [t, z]) == uncertainties.covariance_matrix([t_copy, z]))

    # However, the relationship between a deep copy and the original
    # variables should be broken, since the deep copy created new,
    # independent variables:
    t_deepcopy = copy.deepcopy(t)
    assert x not in t_deepcopy.derivatives
    assert (uncertainties.covariance_matrix([t, z]) !=
            uncertainties.covariance_matrix([t_deepcopy, z]))

    # Test of implementations with weak references:

    # Weak references: destroying a variable should never destroy the
    # integrity of its copies (which would happen if the copy keeps a
    # weak reference to the original, in its derivatives member: the
    # weak reference to the original would become invalid):
    del x

    gc.collect()

    assert y in list(y.derivatives.keys())
Exemplo n.º 5
0
def calculation_Q_conventional(K, a):
    if kontrola_rozmeru(K, a)==False:
        return False, False, False
    Q = -np.dot(K, a)
    Q_covarianceMatrix = covariance_matrix(Q)
    Q_correlationMatrix = correlation_matrix(Q)
    return Q, Q_covarianceMatrix, Q_correlationMatrix
Exemplo n.º 6
0
    def test_correlated_values_correlation_mat():
        '''
        Tests the input of correlated value.

        Test through their correlation matrix (instead of the
        covariance matrix).
        '''

        x = ufloat((1, 0.1))
        y = ufloat((2, 0.3))
        z = -3 * x + y

        cov_mat = uncertainties.covariance_matrix([x, y, z])

        std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal())

        corr_mat = cov_mat / std_devs / std_devs[numpy.newaxis].T

        # We make sure that the correlation matrix is indeed diagonal:
        assert (corr_mat - corr_mat.T).max() <= 1e-15
        # We make sure that there are indeed ones on the diagonal:
        assert (corr_mat.diagonal() - 1).max() <= 1e-15

        # We try to recover the correlated variables through the
        # correlation matrix (not through the covariance matrix):

        nominal_values = [v.nominal_value for v in (x, y, z)]
        std_devs = [v.std_dev() for v in (x, y, z)]
        x2, y2, z2 = uncertainties.correlated_values_norm(
            list(zip(nominal_values, std_devs)), corr_mat)

        # matrices_close() is used instead of _numbers_close() because
        # it compares uncertainties too:

        # Test of individual variables:
        assert matrices_close(numpy.array([x]), numpy.array([x2]))
        assert matrices_close(numpy.array([y]), numpy.array([y2]))
        assert matrices_close(numpy.array([z]), numpy.array([z2]))

        # Partial correlation test:
        assert matrices_close(numpy.array([0]),
                              numpy.array([z2 - (-3 * x2 + y2)]))

        # Test of the full covariance matrix:
        assert matrices_close(
            numpy.array(cov_mat),
            numpy.array(uncertainties.covariance_matrix([x2, y2, z2])))
def test_copy():
    "Standard copy module integration"
    import gc
    
    x = ufloat((3, 0.1))
    assert x == x
    
    y = copy.copy(x)
    assert x != y
    assert not(x == y)
    assert y in y.derivatives.keys()  # y must not copy the dependence on x
    
    z = copy.deepcopy(x)
    assert x != z

    # Copy tests on expressions:
    t = x + 2*z
    # t depends on x:
    assert x in t.derivatives
    
    # The relationship between the copy of an expression and the
    # original variables should be preserved:
    t_copy = copy.copy(t)
    # Shallow copy: the variables on which t depends are not copied:
    assert x in t_copy.derivatives
    assert (uncertainties.covariance_matrix([t, z]) ==
            uncertainties.covariance_matrix([t_copy, z]))

    # However, the relationship between a deep copy and the original
    # variables should be broken, since the deep copy created new,
    # independent variables:
    t_deepcopy = copy.deepcopy(t)
    assert x not in t_deepcopy.derivatives    
    assert (uncertainties.covariance_matrix([t, z]) !=
            uncertainties.covariance_matrix([t_deepcopy, z]))

    # Test of implementations with weak references:

    # Weak references: destroying a variable should never destroy the
    # integrity of its copies (which would happen if the copy keeps a
    # weak reference to the original, in its derivatives member: the
    # weak reference to the original would become invalid):
    del x

    gc.collect()

    assert y in y.derivatives.keys()
Exemplo n.º 8
0
    def test_correlated_values_correlation_mat():
        '''
        Tests the input of correlated value.

        Test through their correlation matrix (instead of the
        covariance matrix).
        '''

        x = ufloat((1, 0.1))
        y = ufloat((2, 0.3))
        z = -3*x+y

        cov_mat = uncertainties.covariance_matrix([x, y, z])

        std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal())

        corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T

        # We make sure that the correlation matrix is indeed diagonal:
        assert (corr_mat-corr_mat.T).max() <= 1e-15
        # We make sure that there are indeed ones on the diagonal:
        assert (corr_mat.diagonal()-1).max() <= 1e-15

        # We try to recover the correlated variables through the
        # correlation matrix (not through the covariance matrix):

        nominal_values = [v.nominal_value for v in (x, y, z)]
        std_devs = [v.std_dev() for v in (x, y, z)]
        x2, y2, z2 = uncertainties.correlated_values_norm(
            list(zip(nominal_values, std_devs)), corr_mat)

        # matrices_close() is used instead of _numbers_close() because
        # it compares uncertainties too:

        # Test of individual variables:
        assert matrices_close(numpy.array([x]), numpy.array([x2]))
        assert matrices_close(numpy.array([y]), numpy.array([y2]))
        assert matrices_close(numpy.array([z]), numpy.array([z2]))

        # Partial correlation test:
        assert matrices_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)]))

        # Test of the full covariance matrix:
        assert matrices_close(
            numpy.array(cov_mat),
            numpy.array(uncertainties.covariance_matrix([x2, y2, z2])))
Exemplo n.º 9
0
def calculation_Q_conventional(K, a_out, a):
    #pridani vnejsi koncentrace do vektoru koncentraci
    a = np.append(a, ufloat(a_out, 0.05 * a_out))
    if kontrola_rozmeru(K, a) == False:
        return False, False, False
    Q = -np.dot(K, a)
    Q_covarianceMatrix = covariance_matrix(Q)
    Q_correlationMatrix = correlation_matrix(Q)
    return Q, Q_covarianceMatrix, Q_correlationMatrix
Exemplo n.º 10
0
def calculation_Q_conventional():
    K, a = prepare_for_calculation()
    # breakpoint()
    if np.isscalar(K):
        print('Nelze provest vypocet.')
    Q = -np.dot(K, a)
    Q_covarianceMatrix = covariance_matrix(Q)
    Q_correlationMatrix = correlation_matrix(Q)
    return Q, Q_covarianceMatrix, Q_correlationMatrix
Exemplo n.º 11
0
def linleastsquares(functions, x_values, y_values):
    y = unp.nominal_values(y_values)
    Z = np.linalg.inv(unc.covariance_matrix(y_values))

    A = np.column_stack([f(x_values) for f in functions])

    invATA = np.linalg.inv(A.T @ Z @ A)
    params = invATA @ A.T @ Z @ y

    cov = invATA

    return unc.correlated_values(params.flat, cov)
Exemplo n.º 12
0
def test_covariances():
    "Covariance matrix"

    x = ufloat((1, 0.1))
    y = -2 * x + 10
    z = -3 * x
    covs = uncertainties.covariance_matrix([x, y, z])
    # Diagonal elements are simple:
    assert _numbers_close(covs[0][0], 0.01)
    assert _numbers_close(covs[1][1], 0.04)
    assert _numbers_close(covs[2][2], 0.09)
    # Non-diagonal elements:
    assert _numbers_close(covs[0][1], -0.02)
def test_covariances():
    "Covariance matrix"

    x = ufloat((1, 0.1))
    y = -2*x+10
    z = -3*x
    covs = uncertainties.covariance_matrix([x, y, z])
    # Diagonal elements are simple:
    assert _numbers_close(covs[0][0], 0.01)
    assert _numbers_close(covs[1][1], 0.04)
    assert _numbers_close(covs[2][2], 0.09)
    # Non-diagonal elements:
    assert _numbers_close(covs[0][1], -0.02)
Exemplo n.º 14
0
def linleastsquares(functionlist, x_values, y_values):
    y = np.matrix(unp.nominal_values(y_values)).T
    Z = np.matrix(unc.covariance_matrix(y_values)).I

    # N rows vor every value, p columns for every function
    dim = (len(x_values), len(functionlist))

    A = np.matrix(np.zeros(dim))

    for i, func in enumerate(functionlist):
        A[:, i] = func(x_values)[:, np.newaxis]

    invATA = (A.T * Z * A).I
    params = invATA * A.T * Z * y

    cov = invATA

    return (np.array(unc.correlated_values(params.flat, np.array(cov))))
Exemplo n.º 15
0
def linleastsquares(functionlist, x_values, y_values):
    y = np.matrix(unp.nominal_values(y_values)).T
    Z = np.matrix(unc.covariance_matrix(y_values)).I

    # N rows vor every value, p columns for every function
    dim = (len(x_values), len(functionlist))

    A = np.matrix(np.zeros(dim))

    for i, func in enumerate(functionlist):
        A[:, i] = func(x_values)[:, np.newaxis]

    invATA = (A.T * Z * A).I
    params = invATA * A.T * Z * y

    cov = invATA

    return (np.array(unc.correlated_values(params.flat, np.array(cov))))
Exemplo n.º 16
0
Arquivo: detector.py Projeto: vsilv/fp
def calibration(detector_name, mu, s_mu):
    """
    does a linear fit fit three points !!!
    """
    def lin_func(x, a, b):
        return(a*x + b)
    fit_, cov_fit_ = curve_fit(lin_func, Es, mu, p0=None, sigma=s_mu)
    fit_corr_ = uc.correlated_values(fit_, cov_fit_)
    fit_corr  = np.array([1 / fit_corr_[0], - fit_corr_[1] / fit_corr_[0]])
    fit = un.nominal_values(fit_corr) 
    cov_fit = uc.covariance_matrix(fit_corr)
    s_fit = un.std_devs(fit_corr) 
    fit_r, s_fit_r = err_round(fit, cov_fit)
    fit_both = np.array([fit_r, s_fit_r])
    fit_both = np.reshape(fit_both.T, np.size(fit_both))

    fig1, ax1 = plt.subplots(1, 1)
    if not save_fig:
        fig1.suptitle("Calibration: " + detector_name)
    #plot1, = ax1.plot(mu, Es, '.', alpha=0.9)   
    mu_grid = np.linspace(0, 700, 100)
    plot_fit, = ax1.plot(mu_grid, lin_func(mu_grid, *fit), alpha=0.5)
    ax1.errorbar(mu, Es, xerr=s_mu, fmt='.', alpha=0.99, c=plot_fit.get_color()) # errors of t are not changed!
    ax1.set_xlabel("channel")
    ax1.set_ylabel("Energy / keV")
    textstr = 'Results of linear fit for %s'%detector_name + ":\n"
    textstr += '\\begin{eqnarray*}\
            E(\mu)&=& a \mu + E_0 \\\\ \
            a     &=& (%.4f \pm %.4f)\, \mathrm{keV / ch} \\\\ \
            E_0   &=& (%.1f \pm %.1f)\, \mathrm{keV} \\\\ \
            \end{eqnarray*}'%tuple(fit_both)
    ax1.text(0.1, 0.95, textstr, transform=ax1.transAxes, va='top', bbox=props)
    if show_fig:
        fig1.show()
    if save_fig:
        file_name = "detector_calibration_" + detector_name
        fig1.savefig(fig_dir + file_name + ".pdf")
        fig1.savefig(fig_dir + file_name + ".png")
    return fit_corr[0]
Exemplo n.º 17
0
def ucurve_fit(f, xdata, ydata, **kwargs):
    """
    Wrapper for curve_fit that allows use of uncertainties both in model and data.
    It returns a tuple o ufloat for parametres, correlated.
    """

    x, ux = unpack_unarray(xdata)
    y, uy = unpack_unarray(ydata)

    uy = unc.covariance_matrix(
        ydata
    )  # I use the covariance matrix as sigma, to fit correlated values

    @wraps(f
           )  # need for pass the number of parametres. Without curve_fit fails
    def model(x, *pars):
        # if you give to nominal_values a standard  numpy.array it returns it
        return unumpy.nominal_values(f(x, *pars))

    p, cov = curve_fit(f=model, xdata=x, ydata=y, sigma=uy, **kwargs)

    return unc.correlated_values(p, cov)
Exemplo n.º 18
0
def weighted_mean(y, covy=None):
    """
    Weighted mean (with covariance matrix).
    
    Parameters
    ----------
    y : array of numbers or ufloats
    covy : None or matrix
        If covy is None, y must be an array of ufloats.
    
    Returns
    -------
    a : ufloat
        Weighted mean of y.
    Q : float
        Chisquare (the value of the minimized quadratic form at the minimum).
    """
    # get covariance matrix
    if covy is None:
        covy = un.covariance_matrix(y)
    else:
        y = un.correlated_values(y, covy)
    
    # compute weighted average
    inv_covy = np.linalg.inv(covy)
    vara = 1 / np.sum(inv_covy)
    a = vara * np.sum(np.dot(inv_covy, y))
    
    # check computation of uncertainties module against direct computation
    assert np.allclose(vara, a.s ** 2)
    
    # compute chisquare
    res = unp.nominal_values(y) - a.n
    Q = float(res.reshape(1,-1) @ inv_covy @ res.reshape(-1,1))
    
    return a, Q
Exemplo n.º 19
0
def formatcov(x, cov=None, labels=None, corrfmt='.0f'):
    """
    Format an estimate with a covariance matrix as an upper triangular matrix
    with values on the diagonal (with uncertainties) and correlations
    off-diagonal.
    
    Parameters
    ----------
    x : M-length array
        Values to be written on the diagonal.
    cov : (M, M) matrix or None
        Covariance matrix from which uncertainties and correlations are
        computed. If None, a covariance matrix is extracted from x with
        uncertainties.covariance_matrix(x).
    labels : list of strings
        Labels for the header of the matrix. If there are less than M labels,
        only the first elements are given labels.
    corrfmt : str
        Format for the correlations.
    
    Returns
    -------
    matrix : TextMatrix
        A TextMatrix instance. Can be converted to a string with str(). Has a
        method latex() to format as a LaTeX table.
    
    See also
    --------
    TextMatrix
    
    Examples
    --------
    >>> popt, pcov = scipy.optimize.curve_fit(f, x, y, ...)
    >>> print(formatcov(popt, pcov))
    >>> print(formatcov(popt, pcov).latex()) # LaTeX table
    """
    if cov is None:
        cov = uncertainties.covariance_matrix(x)
        x = unumpy.nominal_values(x)

    if isinstance(x, dict) and isinstance(cov, dict):
        keys = list(x.keys())
        x = [x[key] for key in keys]
        cov = [[float(cov[keyi, keyj]) for keyj in keys] for keyi in keys]
        if labels is None:
            labels = keys

    pars = _uformat_vect(x, np.sqrt(np.diag(cov)))
    corr = normcov(cov) * 100

    matrix = []
    if not (labels is None):
        if len(labels) < len(x):
            labels = list(labels) + [''] * (len(x) - len(labels))
        elif len(labels) > len(x):
            labels = labels[:len(x)]
        matrix.append(labels)

    for i in range(len(corr)):
        matrix.append([pars[i]])
        for j in range(i + 1, len(corr)):
            c = corr[i, j]
            cs = ('{:' + corrfmt +
                  '} %').format(c) if math.isfinite(c) else str(c)
            matrix[-1].append(cs)

    return TextMatrix(matrix, fill_side='left')
Exemplo n.º 20
0
    def calculate(self, dataframe, ibin):

        # >>> acceptance selection
        var0 = [True,]*len(dataframe)
        if self.delR:
            var0 = var0 & (dataframe['delR'] > self.delR)
        if self.delDxy:
            var0 = var0 & (dataframe['delDxy'] < self.delDxy)
        if self.delDz:
            var0 = var0 & (dataframe['delDz'] < self.delDz)

        if all(var0):
            df = dataframe.copy()
        else:
            df = dataframe.loc[var0].copy()
        # <<< acceptance selection

        # >>> columns for tnp efficiency
        df['pass_muon'] = df['muon_ID'] >= self.ID
        df['pass_antiMuon'] = df['antiMuon_ID'] >= self.ID
        if self.pfIso:
            df['pass_muon'] = df['pass_muon'] & (df['muon_pfIso'] < self.pfIso)
            df['pass_antiMuon'] = df['pass_antiMuon'] & (df['antiMuon_pfIso'] < self.pfIso)

        if self.tkIso:
            df['pass_muon'] = df['pass_muon'] & (df['muon_tkIso'] < self.tkIso)
            df['pass_antiMuon'] = df['pass_antiMuon'] & (df['antiMuon_tkIso'] < self.tkIso)

        df['pass_muon_antiMuon'] = df['pass_muon'] & df['pass_antiMuon']

        nMinus = df['pass_muon'].sum()
        nPlus = df['pass_antiMuon'].sum()
        nPlusMinus = df['pass_muon_antiMuon'].sum()

        if self.HLT:
            df['pass_muon_HLT'] = df['pass_muon_antiMuon'] & df['muon_hlt_{0}'.format(self.HLT)]
            df['pass_antiMuon_HLT'] = df['pass_muon_antiMuon'] & df['antiMuon_hlt_{0}'.format(self.HLT)]
            df['pass_muon_antiMuon_HLT'] = df['pass_muon_HLT'] & df['pass_antiMuon_HLT']

            nMinus_HLT = df['pass_muon_HLT'].sum()
            nPlus_HLT = df['pass_antiMuon_HLT'].sum()
            nPlusMinus_HLT = df['pass_muon_antiMuon_HLT'].sum()
        # <<< columns for tnp efficiency

        # >>> columns for true efficiency
        if self.HLT:
            df['reco'] = df['pass_muon_antiMuon'] & (df['pass_muon_HLT'] | df['pass_antiMuon_HLT'])
        else:
            df['reco'] = df['pass_muon_antiMuon']

        df['not_reco'] = ~df['reco']

        nZReco = df['reco'].sum()
        nZNotReco = df['not_reco'].sum()
        # <<< columns for true efficiency

        # >>> construct full covariance matrix
        if self.HLT:
            corr_matrix = df[['reco','not_reco','pass_muon_antiMuon','pass_muon','pass_antiMuon','pass_muon_antiMuon_HLT','pass_muon_HLT','pass_antiMuon_HLT']].corr()

            nZReco, nZNotReco, nPlusMinus, nMinus, nPlus, nPlusMinus_HLT, nMinus_HLT, nPlus_HLT = unc.correlated_values_norm(
                [(nZReco, np.sqrt(nZReco)), (nZNotReco, np.sqrt(nZNotReco)),
                (nPlusMinus, np.sqrt(nPlusMinus)), (nMinus, np.sqrt(nMinus)), (nPlus, np.sqrt(nPlus)),
                (nPlusMinus_HLT, np.sqrt(nPlusMinus_HLT)), (nMinus_HLT, np.sqrt(nMinus_HLT)), (nPlus_HLT, np.sqrt(nPlus_HLT))],
                corr_matrix)

        else:
            corr_matrix = df[['reco','not_reco','pass_muon_antiMuon','pass_muon','pass_antiMuon']].corr()

            nZReco, nZNotReco, nPlusMinus, nMinus, nPlus = unc.correlated_values_norm(
                [(nZReco, np.sqrt(nZReco)), (nZNotReco, np.sqrt(nZNotReco)),
                (nPlusMinus, np.sqrt(nPlusMinus)), (nMinus, np.sqrt(nMinus)), (nPlus, np.sqrt(nPlus))],
                corr_matrix)
        # <<< construct full covariance matrix

        # >>> compute true and tnp efficiency
        eff_true = nZReco / (nZNotReco + nZReco)

        MuPlusEff = nPlusMinus/nMinus
        MuMinusEff = nPlusMinus/nPlus

        eff_tnp = MuPlusEff * MuMinusEff

        if self.HLT:
            MuPlusEff_HLT = nPlusMinus_HLT/nMinus_HLT
            MuMinusEff_HLT = nPlusMinus_HLT/nPlus_HLT

            eff_tnpZ = (1 - (1 - MuPlusEff_HLT) * (1 - MuMinusEff_HLT) ) * eff_tnp
        # <<< compute true and tnp efficiency
        else:
            eff_tnpZ = eff_tnp

        # >>> store
        self.eff_corr.append(unc.covariance_matrix([eff_tnpZ, eff_true]))
        self.eff_tnp.append(eff_tnp)
        self.eff_tnpZ.append(eff_tnpZ)
        self.eff_true.append(eff_true)
Exemplo n.º 21
0
#(3.1415+/-0)e+10
#>>> print ufloat(3.1415, 0.0005)
#3.1415+/-0.0005
#>>> print '{:.2f}'.format(ufloat(3.14, 0.001))
#3.14+/-0.00
#>>> print '{:.2f}'.format(ufloat(3.14, 0.00))
#3.14+/-0


u = ufloat(1, 0.1, "u variable")  # Tag
v = ufloat(10, 0.1, "v variable")

sum_value = u+2*v
print(sum_value)

cov_matrix = covariance_matrix([u, v, sum_value])
print(cov_matrix)

(u2, v2, sum2) = uncertainties.correlated_values([1, 10, 21], cov_matrix)
print(sum2)

for (var, error) in sum_value.error_components().items():
  print("{}: {}".format(var.tag, error))


output = subprocess.check_output(["echo", "Hello World!"])
print(subprocess.getoutput('ls /bin/ls'))
print(subprocess.getoutput('echo %s' %"Hello World!"))
print(b'hi'.decode('ascii'))
print(output)
Exemplo n.º 22
0
    def __init__(self,
                 dir_x,
                 dir_y,
                 dir_z,
                 unc_x,
                 unc_y,
                 unc_z,
                 num_samples=1000000,
                 random_seed=42,
                 weighted_normalization=True,
                 fix_delta=True):
        """Initialize DNN LLH object.

        Parameters
        ----------
        dir_x : float
            The best fit direction vector x component.
            This is the output of the DNN reco for the x-component.
        dir_y : float
            The best fit direction vector y component.
            This is the output of the DNN reco for the y-component.
        dir_z : float
            The best fit direction vector z component.
            This is the output of the DNN reco for the z-component.
        unc_x : float
            The estimated uncertainty for the direction vector x component.
            This is the output of the DNN reco for the estimated uncertainty.
        unc_y : float
            The estimated uncertainty for the direction vector y component.
            This is the output of the DNN reco for the estimated uncertainty.
        unc_z : float
            The estimated uncertainty for the direction vector z component.
            This is the output of the DNN reco for the estimated uncertainty.
        num_samples : int, optional
            Number of samples to sample for internal calculations.
            The more samples, the more accurate, but also slower.
        random_seed : int, optional
            Random seed for sampling.
        weighted_normalization : bool, optional
            If True the normalization vectors get normalized according to the
            uncertainty on each of its components.
            If False, the vectors get scaled by their norm to obtain unit
            vectors.
        fix_delta : bool, optional
            If True, the sampled direction vectors will sampled in a way such
            that the deltas of the angles: abs(azimuth - sampled_azimuth) and
            abs(zenith - sampled_zenith) follow the expected distribution.
        """
        self.weighted_normalization = weighted_normalization
        u_dir_x = ufloat(dir_x, unc_x)
        u_dir_y = ufloat(dir_y, unc_y)
        u_dir_z = ufloat(dir_z, unc_z)
        u_dir_x, u_dir_y, u_dir_z = self.u_normalize_dir(
            u_dir_x, u_dir_y, u_dir_z)

        # Assign values with propagated and normalized vector
        u_zenith, u_azimuth = self.u_get_zenith_azimuth(
            u_dir_x, u_dir_y, u_dir_z)
        self.dir_x = u_dir_x.nominal_value
        self.dir_y = u_dir_y.nominal_value
        self.dir_z = u_dir_z.nominal_value
        self.zenith = unumpy.nominal_values(u_zenith)
        self.azimuth = unumpy.nominal_values(u_azimuth)
        self.unc_zenith = unumpy.std_devs(u_zenith)
        self.unc_azimuth = unumpy.std_devs(u_azimuth)
        cov = np.array(covariance_matrix([u_zenith, u_azimuth]))
        DNN_LLH_Base_Elliptical.__init__(self, self.zenith, self.azimuth, cov,
                                         num_samples, random_seed,
                                         weighted_normalization, fix_delta)
Exemplo n.º 23
0
    def __init__(self,
                 dir_x,
                 dir_y,
                 dir_z,
                 unc_x,
                 unc_y,
                 unc_z,
                 propagate_errors=False,
                 num_samples=1000000,
                 random_seed=42,
                 scale_unc=True,
                 weighted_normalization=True,
                 fix_delta=True):
        """Initialize DNN LLH object.

        Parameters
        ----------
        dir_x : float
            The best fit direction vector x component.
            This is the output of the DNN reco for the x-component.
        dir_y : float
            The best fit direction vector y component.
            This is the output of the DNN reco for the y-component.
        dir_z : float
            The best fit direction vector z component.
            This is the output of the DNN reco for the z-component.
        unc_x : float
            The estimated uncertainty for the direction vector x component.
            This is the output of the DNN reco for the estimated uncertainty.
        unc_y : float
            The estimated uncertainty for the direction vector y component.
            This is the output of the DNN reco for the estimated uncertainty.
        unc_z : float
            The estimated uncertainty for the direction vector z component.
            This is the output of the DNN reco for the estimated uncertainty.
        propagate_errors : bool, optional
            Propagate errors and account for correlations.
        num_samples : int, optional
            Number of samples to sample for internal calculations.
            The more samples, the more accurate, but also slower.
        random_seed : int, optional
            Random seed for sampling.
        scale_unc : bool, optional
            Due to the normalization of the direction vectors, the components
            of the vector are correlated, hence the actual spread in sampled
            direction vectors shrinks. The nn model predicts the Gaussian
            Likelihood of the normalized vectors (if normalization is included)
            in network model. In this case, the uncertainties of the
            direction vector components can be scaled to account for this
            correlation.
            If set to True, the uncertainties will be scaled.
        weighted_normalization : bool, optional
            If True the normalization vectors get normalized according to the
            uncertainty on each of its components.
            If False, the vectors get scaled by their norm to obtain unit
            vectors.
        fix_delta : bool, optional
            If True, the sampled direction vectors will sampled in a way such
            that the deltas: abs(dir_i - sampled_dir_i) follows the expected
            distribution.
        """

        # call init from base class
        DNN_LLH_Base.__init__(self, dir_x, dir_y, dir_z, unc_x, unc_y, unc_z,
                              random_seed, weighted_normalization)

        self._num_samples = num_samples
        self.propagate_errors = propagate_errors
        self._fix_delta = fix_delta
        if self.propagate_errors:
            # propagate errors
            u_dir_x = unumpy.uarray(dir_x, unc_x)
            u_dir_y = unumpy.uarray(dir_y, unc_y)
            u_dir_z = unumpy.uarray(dir_z, unc_z)
            u_dir_x, u_dir_y, u_dir_z = self.u_normalize_dir(
                u_dir_x, u_dir_y, u_dir_z)

            # Assign values with propagated and normalized vector
            self.unc_x = u_dir_x.std_dev
            self.unc_y = u_dir_y.std_dev
            self.unc_z = u_dir_z.std_dev
            self.dir_x = u_dir_x.nominal_value
            self.dir_y = u_dir_y.nominal_value
            self.dir_z = u_dir_z.nominal_value
            self.cov_matrix = np.array(
                uncertainties.covariance_matrix([u_dir_x, u_dir_y, u_dir_z]))
            self.dist = multivariate_normal(mean=(self.dir_x, self.dir_y,
                                                  self.dir_z),
                                            cov=self.cov_matrix,
                                            allow_singular=True)
        else:
            self.unc_x = unc_x
            self.unc_y = unc_y
            self.unc_z = unc_z
            self.dir_x, self.dir_y, self.dir_z = \
                self.normalize_dir(dir_x, dir_y, dir_z)

        # -------------------------
        # scale up unc if necessary
        # -------------------------
        self.scale_unc = scale_unc

        if self.scale_unc:

            def _scale():
                dir_x_s, dir_y_s, dir_z_s = self.sample_dir(
                    min(self._num_samples, 1000))
                # print('scaling x by:', self.unc_x / np.std(dir_x_s))
                # print('scaling y by:', self.unc_y / np.std(dir_y_s))
                # print('scaling z by:', self.unc_z / np.std(dir_z_s))
                self.unc_x *= self.unc_x / np.std(dir_x_s)
                self.unc_y *= self.unc_y / np.std(dir_y_s)
                self.unc_z *= self.unc_z / np.std(dir_z_s)

            _scale()

        # -------------------------

        self.zenith, self.azimuth = self.get_zenith_azimuth(
            self.dir_x, self.dir_y, self.dir_z)
        # sample contours
        self.dir_x_s, self.dir_y_s, self.dir_z_s = \
            self.sample_dir(self._num_samples)

        self.neg_llh_values = -self.log_prob_dir(self.dir_x_s, self.dir_y_s,
                                                 self.dir_z_s)

        # sort sampled points according to neg llh
        sorted_indices = np.argsort(self.neg_llh_values)
        self.dir_x_s = self.dir_x_s[sorted_indices]
        self.dir_y_s = self.dir_y_s[sorted_indices]
        self.dir_z_s = self.dir_z_s[sorted_indices]
        self.neg_llh_values = self.neg_llh_values[sorted_indices]

        # get sampled zenith and azimuth
        self.zenith_s, self.azimuth_s = self.get_zenith_azimuth(
            self.dir_x_s, self.dir_y_s, self.dir_z_s)
Exemplo n.º 24
0
    def test_correlated_values():
        """
        Correlated variables.
        Test through the input of the (full) covariance matrix.
        """

        u = uncertainties.ufloat((1, 0.1))
        cov = uncertainties.covariance_matrix([u])
        # "1" is used instead of u.nominal_value because
        # u.nominal_value might return a float.  The idea is to force
        # the new variable u2 to be defined through an integer nominal
        # value:
        u2, = uncertainties.correlated_values([1], cov)
        expr = 2 * u2  # Calculations with u2 should be possible, like with u

        ####################

        # Covariances between output and input variables:

        x = ufloat((1, 0.1))
        y = ufloat((2, 0.3))
        z = -3 * x + y

        covs = uncertainties.covariance_matrix([x, y, z])

        # Test of the diagonal covariance elements:
        assert matrices_close(numpy.array([v.std_dev()**2 for v in (x, y, z)]),
                              numpy.array(covs).diagonal())

        # "Inversion" of the covariance matrix: creation of new
        # variables:
        (x_new, y_new, z_new) = uncertainties.correlated_values(
            [x.nominal_value, y.nominal_value, z.nominal_value],
            covs,
            tags=['x', 'y', 'z'])

        # Even the uncertainties should be correctly reconstructed:
        assert matrices_close(numpy.array((x, y, z)),
                              numpy.array((x_new, y_new, z_new)))

        # ... and the covariances too:
        assert matrices_close(
            numpy.array(covs),
            numpy.array(uncertainties.covariance_matrix([x_new, y_new,
                                                         z_new])))

        assert matrices_close(numpy.array([z_new]),
                              numpy.array([-3 * x_new + y_new]))

        ####################

        # ... as well as functional relations:

        u = ufloat((1, 0.05))
        v = ufloat((10, 0.1))
        sum_value = u + 2 * v

        # Covariance matrices:
        cov_matrix = uncertainties.covariance_matrix([u, v, sum_value])

        # Correlated variables can be constructed from a covariance
        # matrix, if NumPy is available:
        (u2, v2, sum2) = uncertainties.correlated_values(
            [x.nominal_value for x in [u, v, sum_value]], cov_matrix)

        # matrices_close() is used instead of _numbers_close() because
        # it compares uncertainties too:
        assert matrices_close(numpy.array([u]), numpy.array([u2]))
        assert matrices_close(numpy.array([v]), numpy.array([v2]))
        assert matrices_close(numpy.array([sum_value]), numpy.array([sum2]))
        assert matrices_close(numpy.array([0]),
                              numpy.array([sum2 - (u2 + 2 * v2)]))
Exemplo n.º 25
0
    def __init__(self, values, cov=None, stds=None, info=None, labels=None):
        """
        Store parameters and associated uncertainties.

        Parameters
        ----------
        values : array_like, dict or uncertainties object
            Either the nominal values of the parameters, a dictionary
            containing `values` and `stds` or `cov` used to create
            a parameter object, or values with associated uncertainties
            created by the `uncertainties` package.
        cov : array_like
            The covariance matrix for the specified parameters used for
            propagating errors. Only used if `values` is array_like, in
            which case i should be an array of shape (len(values), len(values)).
        stds : array_like
            Standard deviations of the values. This does not account for
            correllated uncertainties - use `cov` for that. Only used if
            `values` is array_like, in which case `stds` should be the
            same length as `values`.
        info : str
            A description of the parameteters, to be stored alongside the
            values.
        labels : array_like
            Parameter names.
        """
        if isinstance(values, dict):
            vd = values.copy()
            values = vd['values']
            if 'stds' in vd:
                stds = vd['stds']
            if 'cov' in vd:
                cov = vd['cov']
            if 'info' in vd:
                info = vd['info']
            if 'labels' in vd:
                labels = vd['labels']
        
        if info is None:
            info = 'No information given.'
        self.info = info        
        self.labels = labels

        self.param_name = ''

        if ucheck(values):
            self.values = values
        else:
            if cov is not None:
                self.values = un.correlated_values(values, cov)
            elif stds is not None:
                self.values = unp.uarray(values, stds)
            else:
                self.values = values

        if ucheck(self.values):
            self.nom_values = unp.nominal_values(self.values)
            self.stds = unp.std_devs(self.values)
            self.cov = un.covariance_matrix(self.values)
        else:
            self.nom_values = np.asanyarray(self.values)
            self.stds = np.full(len(self.values), np.nan)
            self.stds = np.full((len(self.values),len(self.values)), np.nan)

        self.wrap = TextWrapper(linewidth).wrap
def test_monte_carlo_comparison():
    """
    Full comparison to a Monte-Carlo calculation.

    Both the nominal values and the covariances are compared between
    the direct calculation performed in this module and a Monte-Carlo
    simulation.
    """
    
    try:
        import numpy
        import numpy.random
    except ImportError:
        import warnings
        warnings.warn("Test not performed because NumPy is not available")
        return

    # Works on numpy.arrays of Variable objects (whereas umath.sin()
    # does not):
    sin_uarrayncert = numpy.vectorize(umath.sin, otypes=[object])
    
    # Example expression (with correlations, and multiple variables combined
    # in a non-linear way):
    def function(x, y):
        """
        Function that takes two NumPy arrays of the same size.
        """
        # The uncertainty due to x is about equal to the uncertainty
        # due to y:
        return 10 * x**2 - x * sin_uarrayncert(y**3)

    x = uncertainties.ufloat((0.2, 0.01))
    y = uncertainties.ufloat((10, 0.001))
    function_result_this_module = function(x, y)
    nominal_value_this_module = function_result_this_module.nominal_value

    # Covariances "f*f", "f*x", "f*y":
    covariances_this_module = numpy.array(uncertainties.covariance_matrix(
        (x, y, function_result_this_module)))

    def monte_carlo_calc(n_samples):
        """
        Calculate function(x, y) on n_samples samples and returns the
        median, and the covariances between (x, y, function(x, y)).
        """
        # Result of a Monte-Carlo simulation:
        x_samples = numpy.random.normal(x.nominal_value, x.std_dev(),
                                        n_samples)
        y_samples = numpy.random.normal(y.nominal_value, y.std_dev(),
                                        n_samples)
        function_samples = function(x_samples, y_samples)

        cov_mat = numpy.cov([x_samples, y_samples], function_samples)
        
        return (numpy.median(function_samples), cov_mat)
        
    (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000)


    ## Comparison between both results:

    # The covariance matrices must be close:

    # We rely on the fact that covariances_samples very rarely has
    # null elements:
    
    assert numpy.vectorize(test_uncertainties._numbers_close)(
        covariances_this_module,
        covariances_samples,
        0.05).all(), (
        "The covariance matrices do not coincide between"
        " the Monte-Carlo simulation and the direct calculation:\n"
        "* Monte-Carlo:\n%s\n* Direct calculation:\n%s"
        % (covariances_samples, covariances_this_module)
        )
    
    # The nominal values must be close:
    assert test_uncertainties._numbers_close(
        nominal_value_this_module,
        nominal_value_samples,
        # The scale of the comparison depends on the standard
        # deviation: the nominal values can differ by a fraction of
        # the standard deviation:
        math.sqrt(covariances_samples[2, 2])
        / abs(nominal_value_samples) * 0.5), (
        "The nominal value (%f) does not coincide with that of"
        " the Monte-Carlo simulation (%f), for a standard deviation of %f."
        % (nominal_value_this_module,
           nominal_value_samples,
           math.sqrt(covariances_samples[2, 2]))
        )
Exemplo n.º 27
0
def test_monte_carlo_comparison():
    """
    Full comparison to a Monte-Carlo calculation.

    Both the nominal values and the covariances are compared between
    the direct calculation performed in this module and a Monte-Carlo
    simulation.
    """

    try:
        import numpy
        import numpy.random
    except ImportError:
        import warnings
        warnings.warn("Test not performed because NumPy is not available")
        return

    # Works on numpy.arrays of Variable objects (whereas umath.sin()
    # does not):
    sin_uarray_uncert = numpy.vectorize(umath.sin, otypes=[object])

    # Example expression (with correlations, and multiple variables combined
    # in a non-linear way):
    def function(x, y):
        """
        Function that takes two NumPy arrays of the same size.
        """
        # The uncertainty due to x is about equal to the uncertainty
        # due to y:
        return 10 * x**2 - x * sin_uarray_uncert(y**3)

    x = ufloat(0.2, 0.01)
    y = ufloat(10, 0.001)
    function_result_this_module = function(x, y)
    nominal_value_this_module = function_result_this_module.nominal_value

    # Covariances "f*f", "f*x", "f*y":
    covariances_this_module = numpy.array(
        uncertainties.covariance_matrix((x, y, function_result_this_module)))

    def monte_carlo_calc(n_samples):
        """
        Calculate function(x, y) on n_samples samples and returns the
        median, and the covariances between (x, y, function(x, y)).
        """
        # Result of a Monte-Carlo simulation:
        x_samples = numpy.random.normal(x.nominal_value, x.std_dev, n_samples)
        y_samples = numpy.random.normal(y.nominal_value, y.std_dev, n_samples)

        # !!! astype() is a temporary fix for NumPy 1.8:
        function_samples = function(x_samples, y_samples).astype(float)

        cov_mat = numpy.cov([x_samples, y_samples], function_samples)

        return (numpy.median(function_samples), cov_mat)

    (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000)

    ## Comparison between both results:

    # The covariance matrices must be close:

    # We rely on the fact that covariances_samples very rarely has
    # null elements:

    assert numpy.vectorize(test_uncertainties.numbers_close)(
        covariances_this_module, covariances_samples, 0.05).all(), (
            "The covariance matrices do not coincide between"
            " the Monte-Carlo simulation and the direct calculation:\n"
            "* Monte-Carlo:\n%s\n* Direct calculation:\n%s" %
            (covariances_samples, covariances_this_module))

    # The nominal values must be close:
    assert test_uncertainties.numbers_close(
        nominal_value_this_module,
        nominal_value_samples,
        # The scale of the comparison depends on the standard
        # deviation: the nominal values can differ by a fraction of
        # the standard deviation:
        math.sqrt(covariances_samples[2, 2]) / abs(nominal_value_samples) *
        0.5), (
            "The nominal value (%f) does not coincide with that of"
            " the Monte-Carlo simulation (%f), for a standard deviation of %f."
            % (nominal_value_this_module, nominal_value_samples,
               math.sqrt(covariances_samples[2, 2])))