Ejemplo n.º 1
0
def test_bounds():
    r1 = minimize(func, (1.5, 1.7, 1.5),
                  bounds=opt.Bounds((1, 1.5, 1), (2, 2, 2)))
    assert r1.success
    assert_allclose(r1.x, (1, 1.5, 2), atol=1e-2)
    r2 = minimize(func, (1.5, 1.7, 1.5), bounds=((1, 2), (1.5, 2), (1, 2)))
    assert r2.success
    assert_equal(r1.x, r2.x)
Ejemplo n.º 2
0
def test_tol():
    ref = np.ones(2)

    def rosen(par):
        x, y = par
        return (1 - x)**2 + 100 * (y - x**2)**2

    r1 = minimize(rosen, (0, 0), tol=1)
    r2 = minimize(rosen, (0, 0), tol=1e-6)

    assert max(np.abs(r2.x - ref)) < max(np.abs(r1.x - ref))
Ejemplo n.º 3
0
def find_MAP(params, distribution, bounds, ntemp=20):

    x0 = np.empty(len(bounds))
    for i in range(len(bounds)):
        low = bounds[i][0]
        high = bounds[i][1]
        x0[i] = low + np.random.rand() * (high - low)

    idxs = np.arange(1, ntemp + 1)
    betas = 2**(0.5*(idxs-idxs[-1]))
    

    for beta in betas:
        #if cm.get_rank==0:
        #    print('beta = ', beta, flush=True)

        distribution.set_beta(beta)
        func = distribution.get_neglogposterior

        cnt = 0
        while True:
            result = minimize(func, x0, options={'maxfev' : 1000}, bounds=bounds)
            x0 = result.x
            cnt += result.nfev
            if result.message == 'Optimization terminated successfully.' or cnt > 20000:
                break

    return result.x, result.fun, result.hess_inv 
Ejemplo n.º 4
0
def test_call_limit():
    ref = minimize(func, (1, 1, 1))
    with pytest.warns(UserWarning):
        r1 = minimize(func, (1, 1, 1), options={"maxiter": 1})
    assert r1.nfev < ref.nfev
    assert not r1.success
    assert "Call limit" in r1.message

    with pytest.warns(DeprecationWarning):
        r2 = minimize(func, (1, 1, 1), options={"maxfev": 1})
    assert not r2.success
    assert r2.nfev == r1.nfev

    r3 = minimize(func, (1, 1, 1), options={"maxfun": 1})
    assert not r3.success
    assert r3.nfev == r1.nfev
Ejemplo n.º 5
0
def MassDiscoveryLimit_Minuit(m_vals,R1_tab,R0,m_DL_vals,gmin=1e-12,gmax=1e-7,ng=100):
    nm = size(m_vals)
    n_DL = size(m_DL_vals)
    DL = zeros(shape=(n_DL))
    g_vals = logspace(log10(gmin),log10(gmax),ng)
    for im in range(0,n_DL):
        for j in range(0,ng):
            g = g_vals[j]
            m0 = m_DL_vals[im]
            N_obs = InterpExpectedEvents(g,m0,m_vals,R1_tab)

            #print log10(g),log10(m0),sum(N_obs)
            D12_prev = 0.0
            g_prev = g
            if sum(N_obs)>3:
                # ----- Massive case -------- #
                L2 = -1.0*lnPF(N_obs,N_obs)

                #------ Massless case ------#
                X_in1 = [log10(g)]
                res = minimize(llhood1, X_in1, args=(N_obs,R0))
                L1 = res.fun

                # Test statistic
                D12 = -2.0*(L2-L1) # significance for measuring mass
                if D12>9.0: # Median 3sigma detection -> D = 9
                    DL[im] = 10.0**(interp(9.0,[D12_prev,D12],[log10(g_prev),log10(g)]))
                    break
                g_prev = g # Reset for interpolation
                D12_prev = D12
    return DL
Ejemplo n.º 6
0
def test_callback():
    trace = []
    result = minimize(func, np.ones(3), callback=lambda x: trace.append(x.copy()))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev == len(trace)
    assert_allclose(trace[0], np.ones(3), atol=1e-2)
    assert_allclose(trace[-1], result.x, atol=1e-2)
Ejemplo n.º 7
0
def test_callback():
    trace = []
    result = minimize(func, np.ones(3),
                      callback=lambda x: trace.append(x.copy()))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev == len(trace)
    assert_allclose(trace[0], np.ones(3), atol=1e-2)
    assert_allclose(trace[-1], result.x, atol=1e-2)
Ejemplo n.º 8
0
def test_bad_function():
    class Fcn:
        n = 0

        def __call__(self, x):
            self.n += 1
            return x**2 + 1e-4 * (self.n % 3)

    r = minimize(Fcn(), [1], options={"maxfun": 100000000})
    assert not r.success
    assert "Estimated distance to minimum too large" in r.message
Ejemplo n.º 9
0
    def minimizeNLL(self, iPOI=-1, vPOI=None):
        '''
        Minimize the NLL with possibly one POI kept constant.
        This POI is selected by its index iPOI and the value
        vPOI is set.
        '''

        # Function to minimize in case of full NLL
        def fullNLL(x):
            Bs = self._array2list(x)
            return self.NLL(Bs)

        # Function to minimize in case of one dixed POI
        def fixedNLL(x):
            b = np.zeros(self.nPOIs)
            b[:iPOI], b[iPOI], b[iPOI + 1:] = x[:iPOI], vPOI, x[iPOI:]
            Bs = self._array2list(b)
            return self.NLL(Bs)

        # Initial starting point as matrix inverted truth bins
        x0 = np.concatenate(self.Bs)

        # Final function to minimize
        nll = fullNLL

        if iPOI > -1:
            # Protection
            if iPOI >= self.nPOIs:
                msg = 'POI index ({}) must be lower than N1+N1 ({})'
                raise NameError(msg.format(iPOI, self.nPOIs))

            # Change the function to minimize
            nll = fixedNLL

            # Change the initial values
            x0 = np.delete(x0, iPOI)

        # Bounds
        xMax = np.max(x0) * 10
        xMin = np.min(x0) / 10
        bounds = [(xMin, xMax)] * x0.shape[0]

        # Minimization
        res = None
        if self.backend == 'scipy':
            res = optimize.minimize(nll,
                                    x0=x0,
                                    tol=1e-6,
                                    method='Powell',
                                    bounds=bounds)
        if self.backend == 'minuit':
            res = iminuit.minimize(nll, x0=x0, bounds=bounds)

        return res
Ejemplo n.º 10
0
def calculate_critical_ts_from_gamma(
        ts, h0_ts_quantile, eta=3.0, xi=1.e-2):
    """Calculates the critical test-statistic value corresponding
    to h0_ts_quantile by fitting the ts distribution with a truncated
    gamma function.

    Parameters
    ----------
    ts : (n_trials,)-shaped 1D ndarray
        The ndarray holding the test-statistic values of the trials.
    h0_ts_quantile : float
        Null-hypothesis test statistic quantile.
    eta : float, optional
        Test-statistic value at which the gamma function is truncated
        from below.
    xi : float, optional
        A small number to numerically discriminate against ts=0.0.

    Returns
    -------
    critical_ts : float
    """
    Ntot = len(ts)
    N = len(ts[ts > xi])
    alpha = N/Ntot

    ts_eta = ts[ts > eta]
    N_prime = len(ts_eta)
    alpha_prime = N_prime/N

    obj = lambda x: truncated_gamma_logpdf(x[0], x[1], eta=eta,
                                           ts_above_eta=ts_eta,
                                           N_above_eta=N_prime)
    x0 = [0.75, 1.8]  # Initial values of function parameters.
    bounds = [[0.1, 10], [0.1, 10]]  # Ranges for the minimization fitter.
    r = minimize(obj, x0, bounds=bounds)
    pars = r.x
    
    norm = alpha*(alpha_prime/gamma.sf(eta, a=pars[0], scale=pars[1]))
    critical_ts = gamma.ppf(1 - 1./norm*h0_ts_quantile, a=pars[0], scale=pars[1])

    if(critical_ts < eta):
        raise ValueError(
            'Critical ts value = %e, eta = %e. The calculation of the critical '
            'ts value from the fit is correct only for critical ts larger than '
            'the truncation threshold eta.',
            critical_ts, eta)

    return critical_ts
Ejemplo n.º 11
0
def test_args():
    result = minimize(func, np.ones(3), args=(5, ))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 5)
    assert result.nfev > 0
    assert result.njev == 0
Ejemplo n.º 12
0
def test_gradient():
    result = minimize(func, (1, 1, 1), jac=grad)
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev > 0
    assert result.njev > 0
Ejemplo n.º 13
0
def test_simple():
    result = minimize(func, (1, 1, 1))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev > 0
    assert result.njev == 0
Ejemplo n.º 14
0
def test_args():
    result = minimize(func, np.ones(3), args=(5,))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 5)
    assert result.nfev > 0
    assert result.njev == 0
Ejemplo n.º 15
0
def _standard_fit(x, y, func, silent=False, **kwargs):

    output = Fit_result()

    output.fit_function = func

    x = np.asarray(x)

    if x.shape[-1] != len(y):
        raise Exception('x and y input have to have the same length')

    if len(x.shape) > 2:
        raise Exception('Unknown format for x values')

    if not callable(func):
        raise TypeError('func has to be a function.')

    for i in range(25):
        try:
            func(np.arange(i), x.T[0])
        except Exception:
            pass
        else:
            break

    n_parms = i

    if not silent:
        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))

    y_f = [o.value for o in y]
    dy_f = [o.dvalue for o in y]

    if np.any(np.asarray(dy_f) <= 0.0):
        raise Exception('No y errors available, run the gamma method first.')

    if 'initial_guess' in kwargs:
        x0 = kwargs.get('initial_guess')
        if len(x0) != n_parms:
            raise Exception(
                'Initial guess does not have the correct length: %d vs. %d' %
                (len(x0), n_parms))
    else:
        x0 = [0.1] * n_parms

    if kwargs.get('correlated_fit') is True:
        cov = covariance_matrix(y)
        covdiag = np.diag(1. / np.sqrt(np.diag(cov)))
        corr = np.copy(cov)
        for i in range(len(y)):
            for j in range(len(y)):
                corr[i][j] = cov[i][j] / np.sqrt(cov[i][i] * cov[j][j])
        condn = np.linalg.cond(corr)
        if condn > 1e4:
            warnings.warn(
                "Correlation matrix may be ill-conditioned! condition number: %1.2e"
                % (condn), RuntimeWarning)
        chol = np.linalg.cholesky(corr)
        chol_inv = np.linalg.inv(chol)
        chol_inv = np.dot(chol_inv, covdiag)

        def chisqfunc(p):
            model = func(p, x)
            chisq = anp.sum(anp.dot(chol_inv, (y_f - model))**2)
            return chisq
    else:

        def chisqfunc(p):
            model = func(p, x)
            chisq = anp.sum(((y_f - model) / dy_f)**2)
            return chisq

    output.method = kwargs.get('method', 'Levenberg-Marquardt')
    if not silent:
        print('Method:', output.method)

    if output.method != 'Levenberg-Marquardt':
        if output.method == 'migrad':
            fit_result = iminuit.minimize(
                chisqfunc, x0,
                tol=1e-4)  # Stopping crieterion 0.002 * tol * errordef
            output.iterations = fit_result.nfev
        else:
            fit_result = scipy.optimize.minimize(chisqfunc,
                                                 x0,
                                                 method=kwargs.get('method'),
                                                 tol=1e-12)
            output.iterations = fit_result.nit

        chisquare = fit_result.fun

    else:
        if kwargs.get('correlated_fit') is True:

            def chisqfunc_residuals(p):
                model = func(p, x)
                chisq = anp.dot(chol_inv, (y_f - model))
                return chisq

        else:

            def chisqfunc_residuals(p):
                model = func(p, x)
                chisq = ((y_f - model) / dy_f)
                return chisq

        fit_result = scipy.optimize.least_squares(chisqfunc_residuals,
                                                  x0,
                                                  method='lm',
                                                  ftol=1e-15,
                                                  gtol=1e-15,
                                                  xtol=1e-15)

        chisquare = np.sum(fit_result.fun**2)

        output.iterations = fit_result.nfev

    if not fit_result.success:
        raise Exception('The minimization procedure did not converge.')

    if x.shape[-1] - n_parms > 0:
        output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms)
    else:
        output.chisquare_by_dof = float('nan')

    output.message = fit_result.message
    if not silent:
        print(fit_result.message)
        print('chisquare/d.o.f.:', output.chisquare_by_dof)

    if kwargs.get('expected_chisquare') is True:
        if kwargs.get('correlated_fit') is not True:
            W = np.diag(1 / np.asarray(dy_f))
            cov = covariance_matrix(y)
            A = W @ jacobian(func)(fit_result.x, x)
            P_phi = A @ np.linalg.inv(A.T @ A) @ A.T
            expected_chisquare = np.trace(
                (np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W)
            output.chisquare_by_expected_chisquare = chisquare / expected_chisquare
            if not silent:
                print('chisquare/expected_chisquare:',
                      output.chisquare_by_expected_chisquare)

    fitp = fit_result.x
    hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))

    if kwargs.get('correlated_fit') is True:

        def chisqfunc_compact(d):
            model = func(d[:n_parms], x)
            chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model))**2)
            return chisq

    else:

        def chisqfunc_compact(d):
            model = func(d[:n_parms], x)
            chisq = anp.sum(((d[n_parms:] - model) / dy_f)**2)
            return chisq

    jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate(
        (fitp, y_f)))

    deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]

    result = []
    for i in range(n_parms):
        result.append(
            derived_observable(
                lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) /
                (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i],
                list(y),
                man_grad=list(deriv[i])))

    output.fit_parameters = result

    output.chisquare = chisqfunc(fit_result.x)
    output.dof = x.shape[-1] - n_parms
    output.p_value = 1 - chi2.cdf(output.chisquare, output.dof)

    if kwargs.get('resplot') is True:
        residual_plot(x, y, func, result)

    if kwargs.get('qqplot') is True:
        qqplot(x, y, func, result)

    return output
Ejemplo n.º 16
0
def GetLimits(m_vals, sigma_vals, HaloModel, Expt, Verbose=True):
    # Load neutrino backgrounds
    Background = NeutrinoFuncs.GetNuFluxes(Expt.EnergyThreshold, Expt.Nucleus)
    n_bg = Background.NumberOfNeutrinos
    R_bg = Background.Normalisations
    R_bg_err = Background.Uncertainties
    RD_nu = zeros(shape=(Expt.TotalNumberOfBins, n_bg))
    for i in range(0, n_bg):
        RD_nu[:, i] = LabFuncs.BinEvents(Expt, NeutrinoFuncs.NuRate,
                                         Background, i)
        RD_nu[:, i] *= (1.0 / R_bg[i])
    Background.RecoilDistribution(RD_nu)

    # Fix parameters for scan
    nm = size(m_vals)
    ns = size(sigma_vals)
    DL = zeros(shape=nm)
    X_in1 = zeros(shape=(n_bg + 1))
    N_bg = zeros(shape=Expt.TotalNumberOfBins)
    for i in range(0, n_bg):
        N_bg = N_bg + R_bg[i] * Background.RD[:, i]

    # MASS SCAN:
    for im in range(0, nm):
        Signal = Params.WIMP(m_vals[im], 1.0e-45)
        RD_wimp = LabFuncs.BinEvents(Expt, WIMPFuncs.WIMPRate, Signal,
                                     HaloModel)
        Signal.RecoilDistribution(RD_wimp / 1.0e-45)

        # CROSS SECTION SCAN
        for j in range(0, ns):
            sigma_p = sigma_vals[j]
            N_signal = Signal.RD * sigma_p
            D_prev = 0.0
            s_prev = sigma_p
            if sum(N_signal) > 0.5:  # Generally need >0.5 events to see DM
                # ------ Asimov dat -----------#
                N_obs = N_signal + N_bg

                #------ Signal + Background ------#
                X_in1 = append(log10(sigma_p), R_bg)
                L1 = llhood1(X_in1, N_obs, Signal, Background)

                #------ Background only ------#
                X_in0 = R_bg
                step = R_bg_err
                res = minimize(llhood0, X_in0, args=(N_obs,Signal,Background)\
                                ,options={'xtol': R_bg_err, 'eps': 2*step})
                L0 = res.fun
                #L0 = llhood0(X_in0,N_obs,Signal,Background)

                # Test statistic
                D01 = -2.0 * (L1 - L0)
                #print(j,sigma_p,D01,sum(N_signal),L1,L0)
                if D01 > 9.0:  # Median 3sigma detection -> D = 9
                    # Do interpolation to find discovery limit cross section
                    DL[im] = 10.0**(interp(9.0,array([D_prev,D01]),\
                               array([log10(s_prev),log10(sigma_p)])))
                    break
                s_prev = sigma_p  # Reset for interpolation
                D_prev = D01
        #Params.printProgressBar(im, nm)
        if Verbose:
            print("m_chi = ",m_vals[im],"| sigma_p = ",DL[im],\
            "| # Signal = ",sum(N_signal),"| # Background = ",sum(N_bg))
    return DL
Ejemplo n.º 17
0
def test_hessinv():
    r = minimize(func, (1, 1, 1))
    href = np.zeros((3, 3))
    for i in range(3):
        href[i, i] = 0.5
    assert_allclose(r.hess_inv, href, atol=1e-8)
Ejemplo n.º 18
0
def test_gradient():
    result = minimize(func, (1, 1, 1), jac=grad)
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev > 0
    assert result.njev > 0
Ejemplo n.º 19
0
    def fit_model(self, parameter_distribution, time,
                  fit_function_electrons, parameters_electron,
                  fit_function_proton=None, parameters_proton=None,
                  energy_range=None):
        """
        Perform a maximum likelihood fit of a given spectral model and return the best
        fit parameters, errors and likelihood

        :param parameter_distribution: ndarray
            Simuated classifier distribution (classifier, reconstructed energy)
        :param time: float
            Observtaion time (seconds)
        :param fit_function_electrons: function pointer
            Electron spectrum fit function
        :param parameters_electron:dict
            Electron spectral parameters and limits
        :param fit_function_proton: function pointer
            Proton spectrum fit function
        :param parameters_proton: dict
            Proton spectral parameters and limits
        :return: dict, dict, float
            Best fit values, fist errors, likelihood at fitted position
        """

        # Load everything into the class
        self.electron_fit_function = fit_function_electrons
        self.proton_fit_function = fit_function_proton
        self.total_distribution = parameter_distribution
        self.time = time
        self.energy_range = energy_range
        self.scale = 1

        # Concatenate the two dictionaries into one model
        fit_parameters = parameters_electron
        if parameters_proton is not None:
            fit_parameters.update(parameters_proton)

        # Copy staring values and limits into arrays
        self.variable_names = list(fit_parameters.keys())
        fit_parameters = list(fit_parameters.values())
        x, limits = [], []
        for p in fit_parameters:
            try:
                x.append(p[0])
                limits.append(p[1])
            except:
                x.append(p)

        try:
            # Perform migrad minimisation
            minimised = minimize(self._get_poisson_likelihood_minimise, np.array(x),
                                 bounds=np.array(limits))
        except RuntimeError:
            return np.nan, np.nan, np.nan

        # Get the fit values
        fit_values = dict(list(zip(self.variable_names, list(minimised.values())[5])))
        likelihood = list(minimised.values())[4]
        minuit = minimised["minuit"]

        # Get the errors from MINOS
        fit_errors = dict()
        for key in self.variable_names:
            fit_errors[key] = (np.nan, np.nan)

        if False:
            try:
                minuit.minos()
                minos_errors = minuit.get_merrors()

                for x, key in zip(minos_errors, self.variable_names):
                    x = minos_errors[x]
                    fit_errors[key] = (np.abs(x["lower"]), np.abs(x["upper"]))

            except RuntimeError:
                # If we fail fill with NaN
                for key in self.variable_names:
                    fit_errors[key] = (np.nan, np.nan)

        return fit_values, fit_errors, likelihood
Ejemplo n.º 20
0
def test_eps():
    ref = minimize(func, (1, 1, 1))
    r = minimize(func, (1, 1, 1), options={"eps": 1e-10})
    assert np.any(ref.x != r.x)
    assert_allclose(r.x, ref.x, atol=1e-9)
Ejemplo n.º 21
0
def test_method_warn():
    with pytest.warns(UserWarning):
        minimize(func, (1.5, 1.7, 1.5), method="foo")
Ejemplo n.º 22
0
    def minimize(self, initials, bounds, func, func_args=None, **kwargs):
        """Minimizes the given function ``func`` with the given initial function
        argument values ``initials`` and within the given parameter bounds
        ``bounds``.

        Parameters
        ----------
        initials : 1D numpy ndarray
            The ndarray holding the initial values of all the fit parameters.
        bounds : 2D (N_fitparams,2)-shaped numpy ndarray
            The ndarray holding the boundary values (vmin, vmax) of the fit
            parameters.
        func : callable
            The function that should get minimized.
            The call signature must be

                ``__call__(x, *args)``

            The return value of ``func`` must be (f, grads), the function value
            at the function arguments ``x`` and the ndarray with the values of
            the function gradient for each fit parameter, if the
            ``func_provides_grads`` keyword argument option is set to True.
            If set to False, ``func`` must return only the function value.
        func_args : sequence | None
            Optional sequence of arguments for ``func``.

        Additional Keyword Arguments
        ----------------------------
        Additional keyword arguments include options for this minimizer
        implementation. Possible options are:

            func_provides_grads : bool
                Flag if the function ``func`` also returns its gradients.
                Default is ``True``.

        Any additional keyword arguments are passed on to the underlaying
        :func:`iminuit.minimize` minimization function.

        Returns
        -------
        xmin : 1D ndarray
            The array containing the function arguments at the function's
            minimum.
        fmin : float
            The function value at its minimum.
        res : iminuit.OptimizeResult
            The iminuit OptimizeResult dictionary with additional information.
        """
        if(func_args is None):
            func_args = tuple()
        if(kwargs is None):
            kwargs = dict()

        func_provides_grads = kwargs.pop('func_provides_grads', True)

        if(func_provides_grads):
            # The function func returns the function value and its gradients,
            # so we need to use the FuncWithGradsFunctor helper class.
            functor = FuncWithGradsFunctor(
                func=func,
                func_args=func_args)

            res = iminuit.minimize(
                fun=functor.get_f,
                x0=initials,
                bounds=bounds,
                jac=functor.get_grads,
                tol=self._ftol,
                **kwargs
            )
        else:
            # The function func returns only the function value, so we can use
            # the
            res = iminuit.minimize(
                func,
                initials,
                bounds=bounds,
                args=func_args,
                tol=self._ftol,
                **kwargs
            )

        return (res.x, res.fun, res)
Ejemplo n.º 23
0
    cons += [{'type': 'ineq', 'fun': lambda x:  -x[i] / min( \
            x[i + 1],\
            x[i - 1],\
            x[i + 24],\
            x[i - 24]) + 1/(1 - alpha) }]# possible methods
# L-BFGS-B, TNC
# COBYLA, SLSQP, trust-constr

mth = "trust-constr"

# rescale_matrix = starting_matrix(0.05)

res = minimize(
    chi2_cov,
    #                    rescale_matrix,
    starting_matrix(1),
    method=mth,
    bounds=bnd,
    constraints=cons,
    options={"maxiter": 2000})

#     rescale_matrix = res["x"]

savetxt("longer_1.dat", res["x"])

plot_matrix(res["x"], res["fun"])  # ploting results from file
ress = np.array(loadtxt("Minuit/matrices/migrad_punish_0.3.dat")).flatten()
plot_matrix(ress, chi2_cov(ress), titleTrue=False)
# plot_results(ress, "rescale_5")# # plotting results from recent calculations
# plot_matrix(res["x"], res["fun"])
# plot_results(res["x"], "rescale")# # how the results differ with changes of starting matrix?
# chi2_of_starting_param = []
Ejemplo n.º 24
0
def test_simple():
    result = minimize(func, (1, 1, 1))
    assert_allclose(result.x, (0, 1, 2), atol=1e-8)
    assert_allclose(result.fun, 1)
    assert result.nfev > 0
    assert result.njev == 0
Ejemplo n.º 25
0
def test_method_warn():
    with pytest.raises(ValueError):
        minimize(func, (1.5, 1.7, 1.5), method="foo")
Ejemplo n.º 26
0
def test_disp(capsys):
    minimize(lambda x: x**2, 0)
    assert capsys.readouterr()[0] == ""
    minimize(lambda x: x**2, 0, options={"disp": True})
    assert capsys.readouterr()[0] != ""
Ejemplo n.º 27
0
def test_hess_warn():
    with pytest.warns(UserWarning):
        minimize(func, (1.5, 1.7, 1.5), hess=True)
Ejemplo n.º 28
0
def test_unsupported():
    with pytest.raises(ValueError):
        minimize(func, (1, 1, 1), constraints=[])
    with pytest.raises(ValueError):
        minimize(func, (1, 1, 1), jac=True)
Ejemplo n.º 29
0
def test_unreliable_uncertainties():
    r = minimize(func, (1.5, 1.7, 1.5), options={"stra": 0})
    assert (
        r.message ==
        "Optimization terminated successfully, but uncertainties are unrealiable."
    )
Ejemplo n.º 30
0
def test_bad_function():
    r = minimize(lambda x: 0, 0)
    assert r.success is False
Ejemplo n.º 31
0
def test_simplex():
    r = minimize(func, (1.5, 1.7, 1.5), method="simplex", tol=1e-4)
    assert r.success
    assert_allclose(r.x, (0, 1, 2), atol=2e-3)
Ejemplo n.º 32
0
    def get_spectral_points(self, parameter_distribution, time,
                            fit_function_electrons, parameters_electron,
                            fit_function_proton=None, parameters_proton=None):
        """

        :param parameter_distribution: ndarray
            Simuated classifier distribution (classifier, reconstructed energy)
        :param time: float
            Observation time (seconds)
        """


        # Load everything into the class
        self.electron_fit_function = fit_function_electrons
        self.proton_fit_function = fit_function_proton
        self.total_distribution = parameter_distribution
        self.time = time

        # Concatenate the two dictionaries into one model
        fit_parameters = parameters_electron
        if parameters_proton is not None:
            fit_parameters.update(parameters_proton)

        # Copy staring values and limits into arrays
        self.variable_names = list(fit_parameters.keys())
        fit_parameters = list(fit_parameters.values())

        x, limits = [], []
        for p in fit_parameters:
            try:
                x.append(p[0])
                limits.append(p[1])
            except:
                x.append(p)

        value_points, error_points = [], []
        for energy_bin in self.reco_energy_bins:
            range = (np.power(10, energy_bin-0.001),
                     np.power(10, energy_bin+0.001))
            self.energy_range = range

            def scaled_spec(scale):
                self.scale = scale
                return self._get_poisson_likelihood_minimise(x)

            try:
                energy_bin = np.power(10, energy_bin)

                # Perform migrad minimisation
                minimised = minimize(scaled_spec, np.array([np.random.normal(1.0, 0.1)]),
                                     bounds=np.array([[0, 3]]))
                fit_value = list(minimised.values())[5][0]
                likelihood = list(minimised.values())[4]
                minuit = minimised["minuit"]

                # Get the errors from MINOS
                try:
                    minuit.minos()
                    errors = np.abs(list(minuit.get_merrors().values())[0]["lower"]), \
                             np.abs(list(minuit.get_merrors().values())[0]["upper"])

                except RuntimeError:
                    errors = (np.nan, np.nan)

                flux = fit_value * fit_function_electrons(energy_bin,
                                                         **parameters_electron)
                val_low = errors[0] * flux
                val_high = errors[1] * flux

                value_points.append(flux)
                error_points.append((val_low, val_high))

            except RuntimeError:
                value_points.append(np.nan)
                error_points.append((np.nan, np.nan))
        self.scale = 1

        return np.power(10,self.reco_energy_bins), np.array(value_points), \
               np.array(error_points).T
Ejemplo n.º 33
0
def standard_fit(x, y, func, silent=False, **kwargs):
    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

    x has to be a list of floats.
    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.

    func has to be of the form

    def func(a, x):
        return a[0] + a[1] * x + a[2] * anp.sinh(x)

    For multiple x values func can be of the form

    def func(a, x):
    (x1, x2) = x
    return a[0] * x1 ** 2 + a[1] * x2

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
    will not work

    Keyword arguments
    -----------------
    dict_output -- If true, the output is a dictionary containing all relevant
                   data instead of just a list of the fit parameters.
    silent -- If true all output to the console is omitted (default False).
    initial_guess -- can provide an initial guess for the input parameters. Relevant for
                     non-linear fits with many parameters.
    method -- can be used to choose an alternative method for the minimization of chisquare.
              The possible methods are the ones which can be used for scipy.optimize.minimize and
              migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
              Reliable alternatives are migrad, Powell and Nelder-Mead.
    resplot -- If true, a plot which displays fit, data and residuals is generated (default False).
    qqplot -- If true, a quantile-quantile plot of the fit result is generated (default False).
    expected_chisquare -- If true prints the expected chisquare which is
                          corrected by effects caused by correlated input data.
                          This can take a while as the full correlation matrix
                          has to be calculated (default False).
    """

    result_dict = {}

    result_dict['fit_function'] = func

    x = np.asarray(x)

    if x.shape[-1] != len(y):
        raise Exception('x and y input have to have the same length')

    if len(x.shape) > 2:
        raise Exception('Unkown format for x values')

    if not callable(func):
        raise TypeError('func has to be a function.')

    for i in range(25):
        try:
            func(np.arange(i), x.T[0])
        except:
            pass
        else:
            break

    n_parms = i
    if not silent:
        print('Fit with', n_parms, 'parameters')

    y_f = [o.value for o in y]
    dy_f = [o.dvalue for o in y]

    if np.any(np.asarray(dy_f) <= 0.0):
        raise Exception('No y errors available, run the gamma method first.')

    if 'initial_guess' in kwargs:
        x0 = kwargs.get('initial_guess')
        if len(x0) != n_parms:
            raise Exception('Initial guess does not have the correct length.')
    else:
        x0 = [0.1] * n_parms

    def chisqfunc(p):
        model = func(p, x)
        chisq = anp.sum(((y_f - model) / dy_f)**2)
        return chisq

    if 'method' in kwargs:
        result_dict['method'] = kwargs.get('method')
        if not silent:
            print('Method:', kwargs.get('method'))
        if kwargs.get('method') == 'migrad':
            fit_result = iminuit.minimize(chisqfunc, x0)
            fit_result = iminuit.minimize(chisqfunc, fit_result.x)
        else:
            fit_result = scipy.optimize.minimize(chisqfunc,
                                                 x0,
                                                 method=kwargs.get('method'))
            fit_result = scipy.optimize.minimize(chisqfunc,
                                                 fit_result.x,
                                                 method=kwargs.get('method'),
                                                 tol=1e-12)

        chisquare = fit_result.fun
    else:
        result_dict['method'] = 'Levenberg-Marquardt'
        if not silent:
            print('Method: Levenberg-Marquardt')

        def chisqfunc_residuals(p):
            model = func(p, x)
            chisq = ((y_f - model) / dy_f)
            return chisq

        fit_result = scipy.optimize.least_squares(chisqfunc_residuals,
                                                  x0,
                                                  method='lm',
                                                  ftol=1e-15,
                                                  gtol=1e-15,
                                                  xtol=1e-15)

        chisquare = np.sum(fit_result.fun**2)

    if not fit_result.success:
        raise Exception('The minimization procedure did not converge.')

    if x.shape[-1] - n_parms > 0:
        result_dict['chisquare/d.o.f.'] = chisquare / (x.shape[-1] - n_parms)
    else:
        result_dict['chisquare/d.o.f.'] = float('nan')

    if not silent:
        print(fit_result.message)
        print('chisquare/d.o.f.:', result_dict['chisquare/d.o.f.'])

    if kwargs.get('expected_chisquare') is True:
        W = np.diag(1 / np.asarray(dy_f))
        cov = covariance_matrix(y)
        A = W @ jacobian(func)(fit_result.x, x)
        P_phi = A @ np.linalg.inv(A.T @ A) @ A.T
        expected_chisquare = np.trace(
            (np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W)
        result_dict[
            'chisquare/expected_chisquare'] = chisquare / expected_chisquare
        if not silent:
            print('chisquare/expected_chisquare:',
                  result_dict['chisquare/expected_chisquare'])

    hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fit_result.x))

    def chisqfunc_compact(d):
        model = func(d[:n_parms], x)
        chisq = anp.sum(((d[n_parms:] - model) / dy_f)**2)
        return chisq

    jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate(
        (fit_result.x, y_f)))

    deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]

    result = []
    for i in range(n_parms):
        result.append(
            derived_observable(
                lambda x, **kwargs: x[0],
                [
                    pseudo_Obs(fit_result.x[i], 0.0, y[0].names[0],
                               y[0].shape[y[0].names[0]])
                ] + list(y),
                man_grad=[0] + list(deriv[i])))

    result_dict['fit_parameters'] = result

    result_dict['chisquare'] = chisqfunc(fit_result.x)
    result_dict['d.o.f.'] = x.shape[-1] - n_parms

    if kwargs.get('resplot') is True:
        residual_plot(x, y, func, result)

    if kwargs.get('qqplot') is True:
        qqplot(x, y, func, result)

    return result_dict if kwargs.get('dict_output') else result