def test_list_of_problems(self):
        list_of_problems = [
            Maratos(),
            Maratos(constr_hess='2-point'),
            Maratos(constr_hess=SR1()),
            Maratos(constr_jac='2-point', constr_hess=SR1()),
            MaratosGradInFunc(),
            HyperbolicIneq(),
            HyperbolicIneq(constr_hess='3-point'),
            HyperbolicIneq(constr_hess=BFGS()),
            HyperbolicIneq(constr_jac='3-point', constr_hess=BFGS()),
            Rosenbrock(),
            IneqRosenbrock(),
            EqIneqRosenbrock(),
            BoundedRosenbrock(),
            Elec(n_electrons=2),
            Elec(n_electrons=2, constr_hess='2-point'),
            Elec(n_electrons=2, constr_hess=SR1()),
            Elec(n_electrons=2, constr_jac='3-point', constr_hess=SR1())
        ]

        for prob in list_of_problems:
            for grad in (prob.grad, '3-point', False):
                for hess in (prob.hess, '3-point', SR1(),
                             BFGS(exception_strategy='damp_update'),
                             BFGS(exception_strategy='skip_update')):

                    # Remove exceptions
                    if grad in ('2-point', '3-point', 'cs', False) and \
                       hess in ('2-point', '3-point', 'cs'):
                        continue
                    if prob.grad is True and grad in ('3-point', False):
                        continue
                    with suppress_warnings() as sup:
                        sup.filter(UserWarning, "delta_grad == 0.0")
                        result = minimize(prob.fun,
                                          prob.x0,
                                          method='trust-constr',
                                          jac=grad,
                                          hess=hess,
                                          bounds=prob.bounds,
                                          constraints=prob.constr)

                    if prob.x_opt is not None:
                        assert_array_almost_equal(result.x,
                                                  prob.x_opt,
                                                  decimal=5)
                        # gtol
                        if result.status == 1:
                            assert_array_less(result.optimality, 1e-8)
                    # xtol
                    if result.status == 2:
                        assert_array_less(result.tr_radius, 1e-8)

                        if result.method == "tr_interior_point":
                            assert_array_less(result.barrier_parameter, 1e-8)
                    # max iter
                    if result.status in (0, 3):
                        raise RuntimeError("Invalid termination condition.")
Exemple #2
0
    def least_correlated_sub_matrix_by_optimization(self, corr_matrix,
                                                    max_dimension):

        self.corr_matrix = corr_matrix
        nr_vars = corr_matrix.columns.size

        A_mat = np.array([[1] * nr_vars])
        b_vec = np.array([max_dimension])

        linear_constraint = LinearConstraint(A_mat, b_vec, b_vec)
        bounds = Bounds([0] * nr_vars, [1] * nr_vars)

        x0 = np.array([1] * nr_vars)

        res = minimize(self._corr_quad,
                       x0,
                       method='trust-constr',
                       jac="2-point",
                       hess=SR1(),
                       constraints=[linear_constraint],
                       options={'verbose': 1},
                       bounds=bounds)

        x_res = res['x']
        x_res_sorted = np.sort(x_res)
        x_comp = (x_res >= x_res_sorted[nr_vars - max_dimension])
        x_hits = np.where(x_comp == True)
        index_hits = x_hits[0].tolist()

        return corr_matrix.iloc[index_hits, index_hits]
Exemple #3
0
 def __init__(self, problem=None, method=None, debug=False):
     print("Initialising SciPy Solver")
     self.problem = problem
     self.debug = debug
     self.method = method
     self.hessian_update_strategy = SR1()
     self.max_iterations = 500
Exemple #4
0
def find_optimal_mix(request: FindOptimalMixRequest) -> OptimalMixResponse:
    f = _build_cost_function(
        request.macros,
        request.target_macros,
        mix_dim=request.mix_dim,
        macros_dim=request.macros_dim,
    )
    bounds = Bounds(
        np.array(request.min_constraints, dtype=np.float64),
        np.array(request.max_constraints, dtype=np.float64),
    )
    initial_guess = np.full(request.mix_dim, 1.0)
    result = minimize(f,
                      initial_guess,
                      method='trust-constr',
                      jac="2-point",
                      hess=SR1(),
                      options={'verbose': 0},
                      bounds=bounds)
    optimal_mix = [round(m, 4) for m in result.x.tolist()]
    achieved_macros = _evaluate_mix(
        request.macros,
        mix=optimal_mix,
        mix_dim=request.mix_dim,
        macros_dim=request.macros_dim,
    )
    return OptimalMixResponse(
        optimal_mix=optimal_mix,
        macros=achieved_macros,
        square_error=result.fun.astype(float),
    )
Exemple #5
0
    def test_hessian_initialization(self):
        quasi_newton = (BFGS(), SR1())

        for qn in quasi_newton:
            qn.initialize(5, 'hess')
            B = qn.get_matrix()

            assert_array_equal(B, np.eye(5))
 def fit(self, feature, label):
     self.prev_proba = self.model.predict_proba(feature)[:, 1]
     self.y = label.astype(int)
     self.optimize_res = minimize(self.target_fun,
                                  self.beta,
                                  jac="2-point",
                                  hess=SR1(),
                                  method="trust-constr")
     print(self.optimize_res)
     print(self.global_mu)
     self.beta_ = self.optimize_res["x"][0]
Exemple #7
0
 def test_SR1_skip_update(self):
     # Define auxiliar problem
     prob = Rosenbrock(n=5)
     # Define iteration points
     x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
               [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
               [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
               [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
               [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
               [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
               [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
               [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
               [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
               [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
               [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
               [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
               [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
               [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
               [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
               [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
               [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
               [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
               [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
     # Get iteration points
     grad_list = [prob.grad(x) for x in x_list]
     delta_x = [
         np.array(x_list[i + 1]) - np.array(x_list[i])
         for i in range(len(x_list) - 1)
     ]
     delta_grad = [
         grad_list[i + 1] - grad_list[i] for i in range(len(grad_list) - 1)
     ]
     hess = SR1(init_scale=1, min_denominator=1e-2)
     hess.initialize(len(x_list[0]), 'hess')
     # Compare the hessian and its inverse
     for i in range(len(delta_x) - 1):
         s = delta_x[i]
         y = delta_grad[i]
         hess.update(s, y)
     # Test skip update
     B = np.copy(hess.get_matrix())
     s = delta_x[17]
     y = delta_grad[17]
     hess.update(s, y)
     B_updated = np.copy(hess.get_matrix())
     assert_array_equal(B, B_updated)
Exemple #8
0
    def least_correlated_sub_matrix_by_optimization_grouped(
            self, corr_matrix, max_dimension, markets_count):

        self.corr_matrix = corr_matrix
        nr_vars = corr_matrix.columns.size

        A_mat = np.zeros((len(max_dimension), nr_vars))
        market_index = self._create_start_end_index_markets(markets_count)
        for each in enumerate(market_index):
            A_mat[each[0], each[1][0]:each[1][1] + 1] = 1

        b_vec = np.array([m[1] for m in max_dimension])

        linear_constraint = LinearConstraint(A_mat, b_vec, b_vec)
        bounds = Bounds([0] * nr_vars, [1] * nr_vars)

        x0 = np.array([1] * nr_vars)

        res = minimize(self._corr_quad,
                       x0,
                       method='trust-constr',
                       jac="2-point",
                       hess=SR1(),
                       constraints=[linear_constraint],
                       options={'verbose': 1},
                       bounds=bounds)

        hits = []
        x_res = res['x']
        for m_idx in enumerate(market_index):
            counter = m_idx[0]
            x_res_sub = x_res[m_idx[1][0]:m_idx[1][1] + 1]
            x_res_sorted = np.sort(x_res_sub)
            x_comp = (x_res_sub >= x_res_sorted[markets_count[counter][1] -
                                                max_dimension[counter][1]])
            x_hits = np.where(x_comp == True)
            index_hits = [h + m_idx[1][0] for h in x_hits[0].tolist()]
            print(index_hits)
            print(corr_matrix.iloc[index_hits, index_hits])

            hits += index_hits

        return corr_matrix.iloc[hits, hits]
    def solve(self):
        # Extract start state
        x0 = self.problem.start_state.copy()

        self.problem.pre_update()

        # Add constraints
        cons = []
        if self.method != "trust-constr":
            if (self.neq_constraint_fun(np.zeros((self.problem.N,))).shape[0] > 0):
                cons.append({'type': 'ineq', 'fun': self.neq_constraint_fun, 'jac': self.neq_constraint_jac})
            
            if (self.eq_constraint_fun(np.zeros((self.problem.N,))).shape[0] > 0):
                cons.append({'type': 'eq', 'fun': self.eq_constraint_fun, 'jac': self.eq_constraint_jac})
        else:
            if (self.neq_constraint_fun(np.zeros((self.problem.N,))).shape[0] > 0):
                cons.append(NonlinearConstraint(self.neq_constraint_fun, 0., np.inf, jac=self.neq_constraint_jac, hess=SR1()))
            
            if (self.eq_constraint_fun(np.zeros((self.problem.N,))).shape[0] > 0):
                cons.append(NonlinearConstraint(self.eq_constraint_fun, 0., 0., jac=self.eq_constraint_jac, hess=SR1()))
            

        # Bounds
        bounds = None
        if self.problem.use_bounds:
            bounds = Bounds(self.problem.get_bounds()[:,0], self.problem.get_bounds()[:,1])

        s = time()
        res = minimize(self.cost_fun, 
                    x0,
                    method=self.method,
                    bounds=bounds,
                    jac=True,
                    hess=SR1(),
                    constraints=cons,
                    options={'disp': self.debug, 'initial_tr_radius':1000., 'maxiter': self.max_iterations})
        e = time()
        if self.debug:
            print(e-s, res.x)

        return [res.x]
Exemple #10
0
def fullfill_constraints_2(x0):
    res = minimize(fake,
                   x0,
                   method='trust-constr',
                   jac="2-point",
                   hess=SR1(),
                   constraints=[
                       linear_constraint, nonlinear_constraint,
                       nonlinear_constraint_gibbs
                   ],
                   options={
                       'verbose': 0,
                       'initial_constr_penalty': 10,
                       'gtol': 1E-4,
                       'maxiter': 1,
                       'xtol': -1
                   },
                   bounds=bounds,
                   tol=1E-6)
    success = res.constr_violation < 0.0005
    return res, success
Exemple #11
0
def minimize_gibbs_free_energy(Total_Free_Energy_func,
                               x0,
                               max_constr_violation=0.01):
    res = minimize(Total_Free_Energy_func,
                   x0,
                   method='trust-constr',
                   jac="2-point",
                   hess=SR1(),
                   constraints=[
                       linear_constraint, nonlinear_constraint,
                       nonlinear_constraint_gibbs
                   ],
                   options={
                       'verbose': 0,
                       'gtol': 1E-3,
                       'maxiter': maxiter,
                       'xtol': 5E-4
                   },
                   bounds=bounds)
    if res.constr_violation > max_constr_violation:
        res.success = False
    return res
Exemple #12
0
    def test_hessian_initialization(self):
        ndims = 5
        rnd_matrix = np.random.randint(1, 50, size=(ndims, ndims))
        init_scales = {
            None:
            np.eye(ndims),
            2:
            np.eye(ndims) * 2,
            np.array(range(1, ndims + 1)):
            np.eye(ndims) * np.array(range(1, ndims + 1)),
            rnd_matrix:
            rnd_matrix
        }
        for init_scale, true_matrix in init_scales.items():
            quasi_newton = (BFGS(init_scale=init_scale),
                            SR1(init_scale=init_scale))

            for qn in quasi_newton:
                qn.initialize(ndims, 'hess')
                B = qn.get_matrix()

                assert_array_equal(B, true_matrix)
Exemple #13
0
def opt_der(u):
    der = np.empty_like(u)
    der[0] = 2 * u[0]
    der[1] = 2 * u[1]
    der[2] = 2 * u[2]
    der[3] = 2 * u[3]
    return der


#Initial control vector - not known.
u0 = np.full(4, 0.1)

# optimize with predefined jacobian. (derivative)
# res = minimize(opt_fun, u0, method='trust-constr', jac=opt_der, hess=SR1(),
#     constraints=[lin_constr], bounds=bounds, options={'verbose':1})

# we can also estimate the jacobian and hessian with finite differences.
res = minimize(opt_fun,
               u0,
               method='trust-constr',
               jac='2-point',
               hess=SR1(),
               constraints=[lin_constr],
               bounds=bounds,
               options={'verbose': 1})

print('final u: ', res.x)
print('result torque: ', np.dot(A, res.x))
print('desired torque: ', M)
Exemple #14
0
def fit_meta_d_MLE(nR_S1, nR_S2, s=1, fncdf=norm.cdf, fninv=norm.ppf):
    """Estimate meta-d'.

    Parameters
    ----------
    nR_S1, nR_S2 : list or 1d array-like
        These are vectors containing the total number of responses in
        each response category, conditional on presentation of S1 and S2.

        e.g. if nR_S1 = [100 50 20 10 5 1], then when stimulus S1 was
        presented, the subject had the following response counts:
            responded S1, rating=3 : 100 times
            responded S1, rating=2 : 50 times
            responded S1, rating=1 : 20 times
            responded S2, rating=1 : 10 times
            responded S2, rating=2 : 5 times
            responded S2, rating=3 : 1 time

        The ordering of response / rating counts for S2 should be the same as
        it is for S1. e.g. if nR_S2 = [3 7 8 12 27 89], then when stimulus S2
        was presented, the subject had the following response counts:
            responded S1, rating=3 : 3 times
            responded S1, rating=2 : 7 times
            responded S1, rating=1 : 8 times
            responded S2, rating=1 : 12 times
            responded S2, rating=2 : 27 times
            responded S2, rating=3 : 89 times

    Returns
    -------
    fit : dict

        In the following, let S1 and S2 represent the distributions of evidence
        generated by stimulus classes S1 and S2.

        fit.da = mean(S2) - mean(S1), in room-mean-square(sd(S1),sd(S2)) units
        fit.s = sd(S1) / sd(S2)
        fit.meta_da = meta-d' in RMS units
        fit.M_diff = meta_da - da
        fit.M_ratio = meta_da / da
        fit.meta_ca = type 1 criterion for meta-d' fit, RMS units
        fit.t2ca_rS1 = type 2 criteria of "S1" responses for meta-d' fit, RMS units
        fit.t2ca_rS2 = type 2 criteria of "S2" responses for meta-d' fit, RMS units

        fit.S1units = contains same parameters in sd(S1) units.
                        these may be of use since the data-fitting is conducted
                        using parameters specified in sd(S1) units.

        fit.logL = log likelihood of the data fit

        fit.est_HR2_rS1 = estimated (from meta-d' fit) type 2 hit rates for S1 responses
        fit.obs_HR2_rS1 = actual type 2 hit rates for S1 responses
        fit.est_FAR2_rS1 = estimated type 2 false alarm rates for S1 responses
        fit.obs_FAR2_rS1 = actual type 2 false alarm rates for S1 responses

        fit.est_HR2_rS2 = estimated type 2 hit rates for S2 responses
        fit.obs_HR2_rS2 = actual type 2 hit rates for S2 responses
        fit.est_FAR2_rS2 = estimated type 2 false alarm rates for S2 responses
        fit.obs_FAR2_rS2 = actual type 2 false alarm rates for S2 responses

    Notes
    -----
    Given data from an experiment where an observer discriminates between two
    stimulus alternatives on every trial and provides confidence ratings,
    provides a type 2 SDT analysis of the data.

    N.B. if nR_S1 or nR_S2 contain zeros, this may interfere with estimation of
    meta-d'.

    Some options for dealing with response cell counts containing zeros are:

    (1) Add a small adjustment factor, e.g. adj_f = 1/(length(nR_S1), to each
    input vector:

    adj_f = 1/length(nR_S1);
    nR_S1_adj = nR_S1 + adj_f;
    nR_S2_adj = nR_S2 + adj_f;

    This is a generalization of the correction for similar estimation issues of
    type 1 d' as recommended in

    Hautus, M. J. (1995). Corrections for extreme proportions and their biasing
        effects on estimated values of d'. Behavior Research Methods,
        Instruments, & Computers, 27, 46-51.

    When using this correction method, it is recommended to add the adjustment
    factor to ALL data for all subjects, even for those subjects whose data is
    not in need of such correction, in order to avoid biases in the analysis
    (cf Snodgrass & Corwin, 1988).

    (2) Collapse across rating categories.

    e.g. if your data set has 4 possible confidence ratings such that
    length(nR_S1)==8, defining new input vectors

    nR_S1_new = [sum(nR_S1(1:2)), sum(nR_S1(3:4)), sum(nR_S1(5:6)), sum(nR_S1(7:8))];
    nR_S2_new = [sum(nR_S2(1:2)), sum(nR_S2(3:4)), sum(nR_S2(5:6)), sum(nR_S2(7:8))];

    might be sufficient to eliminate zeros from the input without using an
    adjustment.

    * s
    this is the ratio of standard deviations for type 1 distributions, i.e.

    s = sd(S1) / sd(S2)

    if not specified, s is set to a default value of 1.
    For most purposes, we recommend setting s = 1.
    See http://www.columbia.edu/~bsm2105/type2sdt for further discussion.

    * fncdf
    a function handle for the CDF of the type 1 distribution.
    if not specified, fncdf defaults to @normcdf (i.e. CDF for normal
    distribution)

    * fninv
    a function handle for the inverse CDF of the type 1 distribution.
    if not specified, fninv defaults to @norminv

    If there are N ratings, then there will be N-1 type 2 hit rates and false
    alarm rates.

    Examples
    --------
    >>> nR_S1 = [36, 24, 17, 20, 10, 12, 9, 2]
    >>> nR_S2 = [1, 4, 10, 11, 19, 18, 28, 39]
    >>> fit = fit_meta_d_MLE(nR_S1,nR_S2)

    References
    ---------
    Adapted from the transcription of fit_meta_d_MLE.m (Maniscalco & Lau, 2012)
    by Alan Lee.
    """
    if (len(nR_S1) % 2) != 0:
        raise ('input arrays must have an even number of elements')
    if len(nR_S1) != len(nR_S2):
        raise ('input arrays must have the same number of elements')
    if any(np.array(nR_S1) == 0) or any(np.array(nR_S2) == 0):
        print(' ')
        print('WARNING!!')
        print('---------')
        print('Your inputs')
        print(' ')
        print('nR_S1:')
        print(nR_S1)
        print('nR_S2:')
        print(nR_S2)
        print(' ')
        print(
            'contain zeros! This may interfere with proper estimation of meta-d'
            '.')
        print('See ' 'help fit_meta_d_MLE' ' for more information.')
        print(' ')
        print(' ')

    nRatings = int(len(nR_S1) / 2)  # number of ratings in the experiment
    nCriteria = int(2 * nRatings - 1)  # number criteria to be fitted

    # parameters
    # meta-d' - 1
    # t2c     - nCriteria-1
    # constrain type 2 criteria values,
    # such that t2c(i) is always <= t2c(i+1)
    # want t2c(i)   <= t2c(i+1)
    # -->  t2c(i+1) >= t2c(i) + 1e-5 (i.e. very small deviation from equality)
    # -->  t2c(i) - t2c(i+1) <= -1e-5
    A = []
    ub = []
    lb = []
    for ii in range(nCriteria - 2):
        tempArow = []
        tempArow.extend(np.zeros(ii + 1))
        tempArow.extend([1, -1])
        tempArow.extend(np.zeros((nCriteria - 2) - ii - 1))
        A.append(tempArow)
        ub.append(-1e-5)
        lb.append(-np.inf)

    # lower bounds on parameters
    LB = []
    LB.append(-10.)  # meta-d'
    LB.extend(-20 * np.ones((nCriteria - 1) // 2))  # criteria lower than t1c
    LB.extend(np.zeros((nCriteria - 1) // 2))  # criteria higher than t1c

    # upper bounds on parameters
    UB = []
    UB.append(10.)  # meta-d'
    UB.extend(np.zeros((nCriteria - 1) // 2))  # criteria lower than t1c
    UB.extend(20 * np.ones((nCriteria - 1) // 2))  # criteria higher than t1c

    # select constant criterion type
    constant_criterion = 'meta_d1 * (t1c1 / d1)'  # relative criterion

    # set up initial guess at parameter values
    ratingHR = []
    ratingFAR = []
    for c in range(1, int(nRatings * 2)):
        ratingHR.append(sum(nR_S2[c:]) / sum(nR_S2))
        ratingFAR.append(sum(nR_S1[c:]) / sum(nR_S1))

    # obtain index in the criteria array to mark Type I and Type II criteria
    t1_index = nRatings - 1
    t2_index = list(set(list(range(0, 2 * nRatings - 1))) - set([t1_index]))

    d1 = (1 / s) * fninv(ratingHR[t1_index]) - fninv(ratingFAR[t1_index])
    meta_d1 = d1

    c1 = (-1 / (1 + s)) * (fninv(ratingHR) + fninv(ratingFAR))
    t1c1 = c1[t1_index]
    t2c1 = c1[t2_index]

    # initial values for the minimization function
    guess = [meta_d1]
    guess.extend(list(t2c1 - eval(constant_criterion)))

    # other inputs for the minimization function
    inputObj = [
        nR_S1, nR_S2, nRatings, d1, t1c1, s, constant_criterion, fncdf, fninv
    ]
    bounds = Bounds(LB, UB)
    linear_constraint = LinearConstraint(A, lb, ub)

    # minimization of negative log-likelihood
    results = minimize(fit_meta_d_logL,
                       guess,
                       args=(inputObj),
                       method='trust-constr',
                       jac='2-point',
                       hess=SR1(),
                       constraints=[linear_constraint],
                       options={'verbose': 1},
                       bounds=bounds)

    # quickly process some of the output
    meta_d1 = results.x[0]
    t2c1 = results.x[1:] + eval(constant_criterion)
    logL = -results.fun

    # data is fit, now to package it...
    # find observed t2FAR and t2HR

    # I_nR and C_nR are rating trial counts for incorrect and correct trials
    # element i corresponds to # (in)correct w/ rating i
    I_nR_rS2 = nR_S1[nRatings:]
    I_nR_rS1 = list(np.flip(nR_S2[0:nRatings], axis=0))

    C_nR_rS2 = nR_S2[nRatings:]
    C_nR_rS1 = list(np.flip(nR_S1[0:nRatings], axis=0))

    obs_FAR2_rS2 = [
        sum(I_nR_rS2[(i + 1):]) / sum(I_nR_rS2) for i in range(nRatings - 1)
    ]
    obs_HR2_rS2 = [
        sum(C_nR_rS2[(i + 1):]) / sum(C_nR_rS2) for i in range(nRatings - 1)
    ]
    obs_FAR2_rS1 = [
        sum(I_nR_rS1[(i + 1):]) / sum(I_nR_rS1) for i in range(nRatings - 1)
    ]
    obs_HR2_rS1 = [
        sum(C_nR_rS1[(i + 1):]) / sum(C_nR_rS1) for i in range(nRatings - 1)
    ]

    # find estimated t2FAR and t2HR
    S1mu = -meta_d1 / 2
    S1sd = 1
    S2mu = meta_d1 / 2
    S2sd = S1sd / s

    mt1c1 = eval(constant_criterion)

    C_area_rS2 = 1 - fncdf(mt1c1, S2mu, S2sd)
    I_area_rS2 = 1 - fncdf(mt1c1, S1mu, S1sd)

    C_area_rS1 = fncdf(mt1c1, S1mu, S1sd)
    I_area_rS1 = fncdf(mt1c1, S2mu, S2sd)

    est_FAR2_rS2 = []
    est_HR2_rS2 = []

    est_FAR2_rS1 = []
    est_HR2_rS1 = []

    for i in range(nRatings - 1):

        t2c1_lower = t2c1[(nRatings - 1) - (i + 1)]
        t2c1_upper = t2c1[(nRatings - 1) + i]

        I_FAR_area_rS2 = 1 - fncdf(t2c1_upper, S1mu, S1sd)
        C_HR_area_rS2 = 1 - fncdf(t2c1_upper, S2mu, S2sd)

        I_FAR_area_rS1 = fncdf(t2c1_lower, S2mu, S2sd)
        C_HR_area_rS1 = fncdf(t2c1_lower, S1mu, S1sd)

        est_FAR2_rS2.append(I_FAR_area_rS2 / I_area_rS2)
        est_HR2_rS2.append(C_HR_area_rS2 / C_area_rS2)

        est_FAR2_rS1.append(I_FAR_area_rS1 / I_area_rS1)
        est_HR2_rS1.append(C_HR_area_rS1 / C_area_rS1)

    # package output
    fit = {}
    fit['da'] = np.sqrt(2 / (1 + s**2)) * s * d1

    fit['s'] = s

    fit['meta_da'] = np.sqrt(2 / (1 + s**2)) * s * meta_d1

    fit['M_diff'] = fit['meta_da'] - fit['da']

    fit['M_ratio'] = fit['meta_da'] / fit['da']

    mt1c1 = eval(constant_criterion)
    fit['meta_ca'] = (np.sqrt(2) * s / np.sqrt(1 + s**2)) * mt1c1

    t2ca = (np.sqrt(2) * s / np.sqrt(1 + s**2)) * np.array(t2c1)
    fit['t2ca_rS1'] = t2ca[0:nRatings - 1]
    fit['t2ca_rS2'] = t2ca[(nRatings - 1):]

    fit['S1units'] = {}
    fit['S1units']['d1'] = d1
    fit['S1units']['meta_d1'] = meta_d1
    fit['S1units']['s'] = s
    fit['S1units']['meta_c1'] = mt1c1
    fit['S1units']['t2c1_rS1'] = t2c1[0:nRatings - 1]
    fit['S1units']['t2c1_rS2'] = t2c1[(nRatings - 1):]

    fit['logL'] = logL

    fit['est_HR2_rS1'] = est_HR2_rS1
    fit['obs_HR2_rS1'] = obs_HR2_rS1

    fit['est_FAR2_rS1'] = est_FAR2_rS1
    fit['obs_FAR2_rS1'] = obs_FAR2_rS1

    fit['est_HR2_rS2'] = est_HR2_rS2
    fit['obs_HR2_rS2'] = obs_HR2_rS2

    fit['est_FAR2_rS2'] = est_FAR2_rS2
    fit['obs_FAR2_rS2'] = obs_FAR2_rS2

    return fit
Exemple #15
0
def fit_rs_meta_d_MLE(nR_S1, nR_S2, s=1, fncdf=norm.cdf, fninv=norm.ppf):

    # check inputs
    if (len(nR_S1) % 2) != 0:
        raise ('input arrays must have an even number of elements')
    if len(nR_S1) != len(nR_S2):
        raise ('input arrays must have the same number of elements')
    if any(np.array(nR_S1) == 0) or any(np.array(nR_S2) == 0):
        print(' ')
        print('WARNING!!')
        print('---------')
        print('Your inputs')
        print(' ')
        print('nR_S1:')
        print(nR_S1)
        print('nR_S2:')
        print(nR_S2)
        print(' ')
        print(
            'contain zeros! This may interfere with proper estimation of meta-d'
            '.')
        print('See ' 'help fit_meta_d_MLE' ' for more information.')
        print(' ')
        print(' ')

    nRatings = int(len(nR_S1) / 2)  # number of ratings in the experiment
    nCriteria = int(2 * nRatings - 1)  # number criteria to be fitted

    # find actual type 2 FAR and HR (data to be fit)
    # I_nR and C_nR are rating trail counts for incorrect and correct trials
    I_nR_rS2 = nR_S1[nRatings:]
    I_nR_rS1 = (nR_S2[:nRatings])[::-1]

    C_nR_rS2 = nR_S2[nRatings:]
    C_nR_rS1 = (nR_S1[:nRatings])[::-1]

    t2FAR_rS2 = []
    t2HR_rS2 = []
    t2FAR_rS1 = []
    t2HR_rS1 = []
    for i in range(1, nRatings):
        t2FAR_rS2.append(sum(I_nR_rS2[i:]) / sum(I_nR_rS2))
        t2HR_rS2.append(sum(C_nR_rS2[i:]) / sum(C_nR_rS2))

        t2FAR_rS1.append(sum(I_nR_rS1[i:]) / sum(I_nR_rS1))
        t2HR_rS1.append(sum(C_nR_rS1[i:]) / sum(C_nR_rS1))
    """
    set up constraints for scipy.optimize.minimum()
    """
    # parameters
    # meta-d' - 1
    # t2c     - (nCriteria - 1) / 2

    A = []
    ub = []
    lb = []

    nCritPerResp = (nCriteria - 1) // 2

    for crit in range(nCritPerResp - 1):
        # c(crit) <= c(crit+1) --> c(crit) - c(crit+1) <= .001
        tempArow = []
        tempArow.extend(np.zeros(crit + 1))
        tempArow.extend([1, -1])
        tempArow.extend(np.zeros((nCritPerResp - 1) - crit - 1))
        A.append(tempArow)
        ub.append(-.001)
        lb.append(-np.inf)

    LB_rS1 = []
    LB_rS1.append(-10.)  # meta-d' rS1
    LB_rS1.extend(-20 * np.ones(nCritPerResp))  # criteria lower than t1c

    UB_rS1 = []
    UB_rS1.append(10.)  # meta-d' rS1
    UB_rS1.extend(np.zeros(nCritPerResp))  # criteria lower than t1c

    LB_rS2 = []
    LB_rS2.append(-10.)  # meta-d' rS2
    LB_rS2.extend(np.zeros(nCritPerResp))  # criteria higher than t1c

    UB_rS2 = []
    UB_rS2.append(10.)  # meta-d' rS2
    UB_rS2.extend(20 * np.ones(nCritPerResp))  # criteria higher than t1c
    """
    prepare other inputs for scipy.optimize.minimum()
    """
    # select constant criterion type
    constant_criterion_rS1 = 'meta_d1_rS1 * (t1c1 / d1)'  # relative criterion
    constant_criterion_rS2 = 'meta_d1_rS2 * (t1c1 / d1)'  # relative criterion

    # set up initial guess at parameter values
    ratingHR = []
    ratingFAR = []
    for c in range(1, int(nRatings * 2)):
        ratingHR.append(sum(nR_S2[c:]) / sum(nR_S2))
        ratingFAR.append(sum(nR_S1[c:]) / sum(nR_S1))

    # obtain index in the criteria array to mark Type I and Type II criteria
    t1_index = nRatings - 1
    t2_index = list(set(list(range(0, 2 * nRatings - 1))) - set([t1_index]))

    d1 = (1 / s) * fninv(ratingHR[t1_index]) - fninv(ratingFAR[t1_index])
    meta_d1_rS1 = d1
    meta_d1_rS2 = d1

    c1 = (-1 / (1 + s)) * (fninv(ratingHR) + fninv(ratingFAR))
    t1c1 = c1[t1_index]
    t2c1 = c1[t2_index]

    # initial values for the minimization function
    guess_rS1 = [meta_d1_rS1]
    guess_rS1.extend(list(t2c1[:nCritPerResp] - eval(constant_criterion_rS1)))
    guess_rS2 = [meta_d1_rS2]
    guess_rS2.extend(list(t2c1[nCritPerResp:] - eval(constant_criterion_rS2)))

    # other inputs for the minimization function
    inputObj_rS1 = [
        nR_S1, nR_S2, nRatings, d1, t1c1, s, constant_criterion_rS1,
        constant_criterion_rS2, fncdf, fninv
    ]
    bounds_rS1 = Bounds(LB_rS1, UB_rS1)
    linear_constraint = LinearConstraint(A, lb, ub)

    # minimization of negative log-likelihood for rS1
    results = minimize(fitM_rS1_logL,
                       guess_rS1,
                       args=(inputObj_rS1),
                       method='trust-constr',
                       jac='2-point',
                       hess=SR1(),
                       constraints=[linear_constraint],
                       options={'verbose': 1},
                       bounds=bounds_rS1)

    # quickly process some of the output
    meta_d1_rS1 = results.x[0]
    meta_c1_rS1 = eval(constant_criterion_rS1)

    t2c1_rS1 = results.x[1:] + eval(constant_criterion_rS1)
    logL_rS1 = -results.fun

    ## find model-estimated type 2 FAR and HR for S1 respones

    # find the estimated type 2 FAR and HR
    S1mu = -meta_d1_rS1 / 2
    S1sd = 1
    S2mu = meta_d1_rS1 / 2
    S2sd = S1sd / s

    # adjust so that everything is centered on t1c1 = 0
    h = 1 - norm.cdf(0, S2mu, S2sd)
    f = 1 - norm.cdf(0, S1mu, S1sd)

    # this is the value of c1 midway b/t S1 and S2
    shift_c1 = (-1 / (1 + s)) * (norm.ppf(h) + norm.ppf(f))

    # shift S1 and S2mu so that they lie on an axis for 0 --> c1=0
    S1mu = S1mu + shift_c1
    S2mu = S2mu + shift_c1

    C_area_rS1 = fncdf(meta_c1_rS1, S1mu, S1sd)
    I_area_rS1 = fncdf(meta_c1_rS1, S2mu, S2sd)

    est_t2FAR_rS1 = []
    est_t2HR_rS1 = []

    for i in range(len(t2c1_rS1)):
        t2c1_lower = t2c1_rS1[i]

        I_FAR_area_rS1 = fncdf(t2c1_lower, S2mu, S2sd)
        C_HR_area_rS1 = fncdf(t2c1_lower, S1mu, S1sd)

        est_t2FAR_rS1.append(I_FAR_area_rS1 / I_area_rS1)
        est_t2HR_rS1.append(C_HR_area_rS1 / C_area_rS1)

    ## fit for S2 responses

    # find the best fit for type 2 hits and FAs
    inputObj_rS2 = [
        nR_S1, nR_S2, nRatings, d1, t1c1, s, constant_criterion_rS1,
        constant_criterion_rS2, fncdf, fninv
    ]
    bounds_rS2 = Bounds(LB_rS2, UB_rS2)
    linear_constraint = LinearConstraint(A, lb, ub)

    # minimization of negative log-likelihood for rS2
    results = minimize(fitM_rS2_logL,
                       guess_rS2,
                       args=(inputObj_rS2),
                       method='trust-constr',
                       jac='2-point',
                       hess=SR1(),
                       constraints=[linear_constraint],
                       options={'verbose': 1},
                       bounds=bounds_rS2)

    # quickly process some of the output
    meta_d1_rS2 = results.x[0]
    meta_c1_rS2 = eval(constant_criterion_rS2)

    t2c1_rS2 = results.x[1:] + eval(constant_criterion_rS2)
    logL_rS2 = -results.fun

    ## find the estimated type 2 FAR and HR

    S1mu = -meta_d1_rS2 / 2
    S1sd = 1
    S2mu = meta_d1_rS2 / 2
    S2sd = S1sd / s

    # adjust so that everything is centered on t1c1 = 0
    h = 1 - norm.cdf(0, S2mu, S2sd)
    f = 1 - norm.cdf(0, S1mu, S1sd)

    # this is the value of c1 midway b/t S1 and S2
    shift_c1 = (-1 / (1 + s)) * (norm.ppf(h) + norm.ppf(f))

    # shift S1 and S2mu so that they lie on an axis for 0 --> c1=0
    S1mu = S1mu + shift_c1
    S2mu = S2mu + shift_c1

    C_area_rS2 = fncdf(meta_c1_rS2, S1mu, S1sd)
    I_area_rS2 = fncdf(meta_c1_rS2, S2mu, S2sd)

    est_t2FAR_rS2 = []
    est_t2HR_rS2 = []

    for i in range(len(t2c1_rS2)):
        t2c1_upper = t2c1_rS2[i]

        I_FAR_area_rS2 = 1 - fncdf(t2c1_upper, S2mu, S2sd)
        C_HR_area_rS2 = 1 - fncdf(t2c1_upper, S1mu, S1sd)

        est_t2FAR_rS2.append(I_FAR_area_rS2 / I_area_rS2)
        est_t2HR_rS2.append(C_HR_area_rS2 / C_area_rS2)

    ## package output

    # type 1 params
    fit = {}
    fit['da'] = SDT_s_convert(d1, s, 'd1', 'da')
    fit['t1ca'] = SDT_s_convert(t1c1, s, 'c1', 'ca')
    fit['s'] = s

    # type 2 fits for rS1
    fit['meta_da_rS1'] = SDT_s_convert(meta_d1_rS1, s, 'd1', 'da')
    fit['t1ca_rS1'] = SDT_s_convert(meta_c1_rS1, s, 'c1', 'ca')
    fit['t2ca_rS1'] = SDT_s_convert(t2c1_rS1, s, 'c1', 'ca')

    fit['M_ratio_rS1'] = fit['meta_da_rS1'] / fit['da']
    fit['M_diff_rS1'] = fit['meta_da_rS1'] - fit['da']

    fit['logL_rS1'] = logL_rS1

    fit['obs_HR2_rS1'] = t2HR_rS1
    fit['est_HR2_rS1'] = est_t2HR_rS1
    fit['obs_FAR2_rS1'] = t2FAR_rS1
    fit['est_FAR2_rS1'] = est_t2FAR_rS1

    # type 2 fits for rS2
    fit['meta_da_rS2'] = SDT_s_convert(meta_d1_rS2, s, 'd1', 'da')
    fit['t1ca_rS2'] = SDT_s_convert(meta_c1_rS2, s, 'c1', 'ca')
    fit['t2ca_rS2'] = SDT_s_convert(t2c1_rS2, s, 'c1', 'ca')

    fit['M_ratio_rS2'] = fit['meta_da_rS2'] / fit['da']
    fit['M_diff_rS2'] = fit['meta_da_rS2'] - fit['da']

    fit['logL_rS2'] = logL_rS2

    fit['obs_HR2_rS2'] = t2HR_rS2
    fit['est_HR2_rS2'] = est_t2HR_rS2
    fit['obs_FAR2_rS2'] = t2FAR_rS2
    fit['est_FAR2_rS2'] = est_t2FAR_rS2

    # S1 units
    fit['S1units'] = {}
    fit['S1units']['d1'] = d1
    fit['S1units']['t1c1'] = t1c1

    fit['S1units']['meta_d1_rS1'] = meta_d1_rS1
    fit['S1units']['t1c1_rS1'] = meta_c1_rS1
    fit['S1units']['t2c1_rS1'] = t2c1_rS1

    fit['S1units']['meta_d1_rS2'] = meta_d1_rS2
    fit['S1units']['t1c1_rS2'] = meta_c1_rS2
    fit['S1units']['t2c1_rS2'] = t2c1_rS2

    return fit
Exemple #16
0
 def test_rosenbrock_with_no_exception(self):
     # Define auxiliar problem
     prob = Rosenbrock(n=5)
     # Define iteration points
     x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
               [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
               [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
               [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
               [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
               [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
               [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
               [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
               [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
               [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
               [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
               [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
               [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
               [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
               [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
               [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
               [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
               [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
               [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
               [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
               [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
               [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
               [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
               [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
               [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
               [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
               [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
               [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
               [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
               [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
               [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
               [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
               [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
               [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
               [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
               [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
               [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
               [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
     # Get iteration points
     grad_list = [prob.grad(x) for x in x_list]
     delta_x = [
         np.array(x_list[i + 1]) - np.array(x_list[i])
         for i in range(len(x_list) - 1)
     ]
     delta_grad = [
         grad_list[i + 1] - grad_list[i] for i in range(len(grad_list) - 1)
     ]
     # Check curvature condition
     for i in range(len(delta_x)):
         s = delta_x[i]
         y = delta_grad[i]
         if np.dot(s, y) <= 0:
             raise ArithmeticError()
     # Define QuasiNewton update
     for quasi_newton in (BFGS(init_scale=1,
                               min_curvature=1e-4), SR1(init_scale=1)):
         hess = deepcopy(quasi_newton)
         inv_hess = deepcopy(quasi_newton)
         hess.initialize(len(x_list[0]), 'hess')
         inv_hess.initialize(len(x_list[0]), 'inv_hess')
         # Compare the hessian and its inverse
         for i in range(len(delta_x)):
             s = delta_x[i]
             y = delta_grad[i]
             hess.update(s, y)
             inv_hess.update(s, y)
             B = hess.get_matrix()
             H = inv_hess.get_matrix()
             assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
         B_true = prob.hess(x_list[i + 1])
         assert_array_less(norm(B - B_true) / norm(B_true), 0.1)
Exemple #17
0
ub_scale[idx] = np.full(len(idx),1.)
ub_sigma[idx] = np.full(len(idx),-3.5)
ub_nsig[idx] = np.full(len(idx),6.9)
ub_slope[idx] = np.full(len(idx),-0.1)
ub_nbkg[idx] = np.full(len(idx),6.9)

lb = np.concatenate((lb_scale,lb_sigma,lb_nsig,lb_slope,lb_nbkg),axis=None)
ub = np.concatenate((ub_scale,ub_sigma,ub_nsig,ub_slope,ub_nbkg),axis=None)

constraints = LinearConstraint( A=np.eye(x.shape[0]), lb=lb, ub=ub,keep_feasible=True )

grad = grad(nllbkg)
hess = hessian(nllbkg)

res = minimize(nllbkg, x, args=(nEtaBins,nPtBins,datasetJ,datasetJgen),\
	method = 'trust-constr',jac = grad, hess=SR1(), constraints = constraints,\
	options={'verbose':3,'disp':True,'maxiter' : 100000, 'gtol' : 0., 'xtol' : xtol, 'barrier_tol' : btol})

print res

good_idx = np.where((np.sum(datasetJgen,axis=2)>1000.).flatten())[0]

sep = nEtaBins*nEtaBins*nPtBins*nPtBins
good_idx = np.concatenate((good_idx, good_idx+sep,good_idx+2*sep, good_idx+3*sep, good_idx+4*sep), axis=None)

fitres = res.x[good_idx]

gradient = grad(res.x,nEtaBins,nPtBins,datasetJ,datasetJgen)
gradfinal = gradient[good_idx]

print gradient, "gradient"
Exemple #18
0
# In[72]:

linear_constraint = LinearConstraint(
    lc, [1] * (number_of_phases + 1), [1] *
    (number_of_phases +
     1))  #sum of phase fractions and sum of concentrations in each phase is 1

# Nonlinear constraint: mass conservation + Gibbs phase rule

# In[73]:

nonlinear_constraint = NonlinearConstraint(cons_f,
                                           0,
                                           0,
                                           jac=cons_J_vec,
                                           hess=SR1())

# In[74]:

nonlinear_constraint_gibbs = NonlinearConstraint(gibbs,
                                                 1,
                                                 N,
                                                 jac=gibbs_J,
                                                 hess=SR1())

# Constraints for SLS

# In[75]:

eq_cons = {
    'type':

def obj_function(rho):
    w = forward(rho)
    cost = eval_cost_fem(w, rho)
    return cost


def min_f(x):
    value, grad = jax.value_and_grad(obj_function)(x)
    return onp.array(value), onp.array(grad)


fun_ncl = lambda x: eval_volume_fem(x)  # noqa: E731
volume_constraint = NonlinearConstraint(
    fun_ncl, 0.0, np.inf, jac=lambda x: jax.grad(fun_ncl)(x), hess=SR1()
)

x0 = np.ones(A.dim())
res = minimize(
    min_f,
    x0,
    method="trust-constr",
    jac=True,
    hessp=SR1(),
    tol=1e-7,
    constraints=volume_constraint,
    bounds=((0, 1.0),) * A.dim(),
    options={"verbose": 3, "gtol": 1e-7, "maxiter": 20},
)
Exemple #20
0
def fit_meta_d_MLE(nR_S1, nR_S2, s = 1, fncdf = norm.cdf, fninv = norm.ppf):

    # check inputs
    if (len(nR_S1) % 2)!=0: 
        raise('input arrays must have an even number of elements')
    if len(nR_S1)!=len(nR_S2):
        raise('input arrays must have the same number of elements')
    if any(np.array(nR_S1) == 0) or any(np.array(nR_S2) == 0):
        print(' ')
        print('WARNING!!')
        print('---------')
        print('Your inputs')
        print(' ')
        print('nR_S1:')
        print(nR_S1)
        print('nR_S2:')
        print(nR_S2)
        print(' ')
        print('contain zeros! This may interfere with proper estimation of meta-d''.')
        print('See ''help fit_meta_d_MLE'' for more information.')
        print(' ')
        print(' ')
    
    nRatings = int(len(nR_S1) / 2)  # number of ratings in the experiment
    nCriteria = int(2*nRatings - 1) # number criteria to be fitted
    
    """
    set up constraints for scipy.optimize.minimum()
    """
    # parameters
    # meta-d' - 1
    # t2c     - nCriteria-1
    # constrain type 2 criteria values,
    # such that t2c(i) is always <= t2c(i+1)
    # want t2c(i)   <= t2c(i+1) 
    # -->  t2c(i+1) >= t2c(i) + 1e-5 (i.e. very small deviation from equality) 
    # -->  t2c(i) - t2c(i+1) <= -1e-5 
    A = []
    ub = []
    lb = []
    for ii in range(nCriteria-2):
        tempArow = []
        tempArow.extend(np.zeros(ii+1))
        tempArow.extend([1, -1])
        tempArow.extend(np.zeros((nCriteria-2)-ii-1))
        A.append(tempArow)
        ub.append(-1e-5)
        lb.append(-np.inf)
        
    # lower bounds on parameters
    LB = []
    LB.append(-10.)                              # meta-d'
    LB.extend(-20*np.ones((nCriteria-1)//2))    # criteria lower than t1c
    LB.extend(np.zeros((nCriteria-1)//2))       # criteria higher than t1c
    
    # upper bounds on parameters
    UB = []
    UB.append(10.)                           # meta-d'
    UB.extend(np.zeros((nCriteria-1)//2))      # criteria lower than t1c
    UB.extend(20*np.ones((nCriteria-1)//2))    # criteria higher than t1c
    
    """
    prepare other inputs for scipy.optimize.minimum()
    """
    # select constant criterion type
    constant_criterion = 'meta_d1 * (t1c1 / d1)' # relative criterion
    
    # set up initial guess at parameter values
    ratingHR  = []
    ratingFAR = []
    for c in range(1,int(nRatings*2)):
        ratingHR.append(sum(nR_S2[c:]) / sum(nR_S2))
        ratingFAR.append(sum(nR_S1[c:]) / sum(nR_S1))
    
    # obtain index in the criteria array to mark Type I and Type II criteria
    t1_index = nRatings-1
    t2_index = list(set(list(range(0,2*nRatings-1))) - set([t1_index]))
    
    d1 = (1/s) * fninv( ratingHR[t1_index] ) - fninv( ratingFAR[t1_index] )
    meta_d1 = d1
    
    c1 = (-1/(1+s)) * ( fninv( ratingHR ) + fninv( ratingFAR ) )
    t1c1 = c1[t1_index]
    t2c1 = c1[t2_index]
    
    # initial values for the minimization function
    guess = [meta_d1]
    guess.extend(list(t2c1 - eval(constant_criterion)))
    
    # other inputs for the minimization function
    inputObj = [nR_S1, nR_S2, nRatings, d1, t1c1, s, constant_criterion, fncdf, fninv]        
    bounds = Bounds(LB,UB)
    linear_constraint = LinearConstraint(A,lb,ub)
    
    # minimization of negative log-likelihood
    results = minimize(fit_meta_d_logL, guess, args = (inputObj), method='trust-constr',
                       jac='2-point', hess=SR1(),
                       constraints = [linear_constraint],
                       options = {'verbose': 1}, bounds = bounds)
    
    # quickly process some of the output
    meta_d1 = results.x[0]
    t2c1    = results.x[1:] + eval(constant_criterion)
    logL    = -results.fun
    
    # data is fit, now to package it...
    # find observed t2FAR and t2HR 
    
    # I_nR and C_nR are rating trial counts for incorrect and correct trials
    # element i corresponds to # (in)correct w/ rating i
    I_nR_rS2 = nR_S1[nRatings:]
    I_nR_rS1 = list(np.flip(nR_S2[0:nRatings],axis=0))
    
    C_nR_rS2 = nR_S2[nRatings:]
    C_nR_rS1 = list(np.flip(nR_S1[0:nRatings],axis=0))
    
    obs_FAR2_rS2 = [sum( I_nR_rS2[(i+1):] ) / sum(I_nR_rS2) for i in range(nRatings-1)]
    obs_HR2_rS2 = [sum( C_nR_rS2[(i+1):] ) / sum(C_nR_rS2) for i in range(nRatings-1)]
    obs_FAR2_rS1 = [sum( I_nR_rS1[(i+1):] ) / sum(I_nR_rS1) for i in range(nRatings-1)]
    obs_HR2_rS1 = [sum( C_nR_rS1[(i+1):] ) / sum(C_nR_rS1) for i in range(nRatings-1)]
    
    # find estimated t2FAR and t2HR
    S1mu = -meta_d1/2
    S1sd = 1
    S2mu =  meta_d1/2
    S2sd = S1sd/s
    
    mt1c1 = eval(constant_criterion)
    
    C_area_rS2 = 1-fncdf(mt1c1,S2mu,S2sd)
    I_area_rS2 = 1-fncdf(mt1c1,S1mu,S1sd)
    
    C_area_rS1 = fncdf(mt1c1,S1mu,S1sd)
    I_area_rS1 = fncdf(mt1c1,S2mu,S2sd)
    
    est_FAR2_rS2 = []
    est_HR2_rS2 = []
    
    est_FAR2_rS1 = []
    est_HR2_rS1 = []
    
    
    for i in range(nRatings-1):
        
        t2c1_lower = t2c1[(nRatings-1)-(i+1)]
        t2c1_upper = t2c1[(nRatings-1)+i]
            
        I_FAR_area_rS2 = 1-fncdf(t2c1_upper,S1mu,S1sd)
        C_HR_area_rS2  = 1-fncdf(t2c1_upper,S2mu,S2sd)
    
        I_FAR_area_rS1 = fncdf(t2c1_lower,S2mu,S2sd)
        C_HR_area_rS1  = fncdf(t2c1_lower,S1mu,S1sd)
    
        est_FAR2_rS2.append(I_FAR_area_rS2 / I_area_rS2)
        est_HR2_rS2.append(C_HR_area_rS2 / C_area_rS2)
        
        est_FAR2_rS1.append(I_FAR_area_rS1 / I_area_rS1)
        est_HR2_rS1.append(C_HR_area_rS1 / C_area_rS1)
    
    
    # package output
    fit = {}
    fit['da']       = np.sqrt(2/(1+s**2)) * s * d1
    
    fit['s']        = s
    
    fit['meta_da']  = np.sqrt(2/(1+s**2)) * s * meta_d1
    
    fit['M_diff']   = fit['meta_da'] - fit['da']
    
    fit['M_ratio']  = fit['meta_da'] / fit['da']
    
    mt1c1         = eval(constant_criterion)
    fit['meta_ca']  = ( np.sqrt(2)*s / np.sqrt(1+s**2) ) * mt1c1
    
    t2ca          = ( np.sqrt(2)*s / np.sqrt(1+s**2) ) * np.array(t2c1)
    fit['t2ca_rS1']     = t2ca[0:nRatings-1]
    fit['t2ca_rS2']     = t2ca[(nRatings-1):]
    
    fit['S1units'] = {}
    fit['S1units']['d1']        = d1
    fit['S1units']['meta_d1']   = meta_d1
    fit['S1units']['s']         = s
    fit['S1units']['meta_c1']   = mt1c1
    fit['S1units']['t2c1_rS1']  = t2c1[0:nRatings-1]
    fit['S1units']['t2c1_rS2']  = t2c1[(nRatings-1):]
    
    fit['logL']    = logL
    
    fit['est_HR2_rS1']  = est_HR2_rS1
    fit['obs_HR2_rS1']  = obs_HR2_rS1
    
    fit['est_FAR2_rS1'] = est_FAR2_rS1
    fit['obs_FAR2_rS1'] = obs_FAR2_rS1
    
    fit['est_HR2_rS2']  = est_HR2_rS2
    fit['obs_HR2_rS2']  = obs_HR2_rS2
    
    fit['est_FAR2_rS2'] = est_FAR2_rS2
    fit['obs_FAR2_rS2'] = obs_FAR2_rS2

    return fit
Exemple #21
0
def metad(
    data=None,
    nRatings=4,
    stimuli="Stimuli",
    accuracy="Accuracy",
    confidence="Confidence",
    padAmount=None,
    nR_S1=None,
    nR_S2=None,
    s=1,
    padding=True,
    collapse=None,
    fncdf=norm.cdf,
    fninv=norm.ppf,
    verbose=1,
    output_df=False,
):
    """Estimate meta-d' using maximum likelihood estimation (MLE).

    This function is adapted from the transcription of fit_meta_d_MLE.m
    (Maniscalco & Lau, 2012) by Alan Lee:
    http://www.columbia.edu/~bsm2105/type2sdt/.

    Parameters
    ----------
    data : :py:class:`pandas.DataFrame` or None
        Dataframe. Note that this function can also directly be used as a
        Pandas method, in which case this argument is no longer needed.
    nRatings : int
        Number of discrete ratings. If a continuous rating scale was used, and
        the number of unique ratings does not match `nRatings`, will convert to
        discrete ratings using :py:func:`metadPy.utils.discreteRatings`.
        Default is set to 4.
    stimuli : string or None
        Name of the column containing the stimuli.
    accuracy : string or None
        Name of the columns containing the accuracy.
    confidence : string or None
        Name of the column containing the confidence ratings.
    nR_S1, nR_S2 : list, 1d array-like or None
        These are vectors containing the total number of responses in
        each response category, conditional on presentation of S1 and S2. If
        nR_S1 = [100, 50, 20, 10, 5, 1], then when stimulus S1 was presented, the
        subject had the following response counts:
            * responded `'S1'`, rating=`3` : 100 times
            * responded `'S1'`, rating=`2` : 50 times
            * responded `'S1'`, rating=`1` : 20 times
            * responded `'S2'`, rating=`1` : 10 times
            * responded `'S2'`, rating=`2` : 5 times
            * responded `'S2'`, rating=`3` : 1 time

        The ordering of response / rating counts for S2 should be the same as
        it is for S1. e.g. if nR_S2 = [3, 7, 8, 12, 27, 89], then when stimulus S2
        was presented, the subject had the following response counts:
            * responded `'S1'`, rating=`3` : 3 times
            * responded `'S1'`, rating=`2` : 7 times
            * responded `'S1'`, rating=`1` : 8 times
            * responded `'S2'`, rating=`1` : 12 times
            * responded `'S2'`, rating=`2` : 27 times
            * responded `'S2'`, rating=`3` : 89 times
    s : int
        Ratio of standard deviations for type 1 distributions as:
        `s = np.std(S1) / np.std(S2)`. If not specified, s is set to a default
        value of 1. For most purposes, it is recommended to set `s=1`. See
        http://www.columbia.edu/~bsm2105/type2sdt for further discussion.
    padding : boolean
        If `True`, a small value will be added to the counts to avoid problems
        during fit.
    padAmount : float or None
        The value to add to each response count if padding is set to 1.
        Default value is 1/(2*nRatings)
    collapse : int or None
        If an integer `N` is provided, will collpase ratings to avoid zeros by
        summing every `N` consecutive ratings. Default set to `None`.
    fncdf : func
        A function handle for the CDF of the type 1 distribution. If not
        specified, fncdf defaults to :py:func:`scipy.stats.norm.cdf()`.
    fninv : func
        A function handle for the inverse CDF of the type 1 distribution. If
        not specified, fninv defaults to :py:func:`scipy.stats.norm.ppf()`.
    verbose : {0, 1, 2}
        Level of algorithm’s verbosity:
            * 0 (default) : work silently.
            * 1 : display a termination report.
            * 2 : display progress during iterations.
            * 3 : display progress during iterations (more complete report).
    output_df : bool
        If `True`, return a :py:`class:pandas:DataFrame`, otherwise will
        return a dictionary.

    Returns
    -------
    fit : dict or :py:class:`pandas.DataFrame`
        In the following, S1 and S2 represent the distributions of evidence
        generated by stimulus classes S1 and S2:

            * `'da'` : `mean(S2) - mean(S1)`, in
                root-mean-square(sd(S1), sd(S2)) units
            * `'s'` : `sd(S1) / sd(S2)`
            * `'meta_da'` : meta-d' in RMS units
            * `'M_diff'` : `meta_da - da`
            * `'M_ratio'` : `meta_da / da`
            * `'meta_ca'` : type 1 criterion for meta-d' fit, RMS units.
            * `'t2ca_rS1'` : type 2 criteria of "S1" responses for meta-d' fit,
                RMS units.
            * `'t2ca_rS2'` : type 2 criteria of "S2" responses for meta-d' fit,
                RMS units.
            * `'logL'` : log likelihood of the data fit
            * `'est_HR2_rS1'` : estimated (from meta-d' fit) type 2 hit rates
                for S1 responses.
            * `'obs_HR2_rS1'` : actual type 2 hit rates for S1 responses.
            * `'est_FAR2_rS1'` : estimated type 2 false alarm rates for S1
                responses.
            * `'obs_FAR2_rS1'` : actual type 2 false alarm rates for S1
                responses.
            * `'est_HR2_rS2'` : estimated type 2 hit rates for S2 responses.
            * `'obs_HR2_rS2'` : actual type 2 hit rates for S2 responses.
            * `'est_FAR2_rS2'` : estimated type 2 false alarm rates for S2
                responses.
            * `'obs_FAR2_rS2'` : actual type 2 false alarm rates for S2
                responses.

    Notes
    -----
    Given data from an experiment where an observer discriminates between two
    stimulus alternatives on every trial and provides confidence ratings,
    provides a type 2 SDT analysis of the data.

    .. warning:: If nR_S1 or nR_S2 contain zeros, this may interfere with
        estimation of meta-d'. Some options for dealing with response cell
        counts containing zeros are:

        * Add a small adjustment factor (e.g. `1/(len(nR_S1)`, to each input
            vector. This is a generalization of the correction for similar
            estimation issues of type 1 d' as recommended in [1]_. When using
            this correction method, it is recommended to add the adjustment
            factor to ALL data for all subjects, even for those subjects whose
            data is not in need of such correction, in order to avoid biases in
            the analysis (cf [2]_). Use `padding==True` to activate this
            correction.

        * Collapse across rating categories. e.g. if your data set has 4
            possible confidence ratings such that `len(nR_S1)==8`, defining new
            input vectors:

            >>> nR_S1 = nR_S1.reshape(int(len(nR_S1)/collapse), 2).sum(axis=1)

            This might be sufficient to eliminate zeros from the input without
            using an adjustment. Use `collapse=True` to activate this
            correction.

    If there are N ratings, then there will be N-1 type 2 hit rates and false
    alarm rates.

    Examples
    --------
    No correction
    >>> nR_S1 = [36, 24, 17, 20, 10, 12, 9, 2]
    >>> nR_S2 = [1, 4, 10, 11, 19, 18, 28, 39]
    >>> fit = fit_meta_d_MLE(nR_S1, nR_S2, padding=False)

    Correction by padding values
    >>> nR_S1 = [36, 24, 17, 20, 10, 12, 9, 2]
    >>> nR_S2 = [1, 4, 10, 11, 19, 18, 28, 39]
    >>> fit = fit_meta_d_MLE(nR_S1, nR_S2, padding=True)

    Correction by collapsing values
    >>> nR_S1 = [36, 24, 17, 20, 10, 12, 9, 2]
    >>> nR_S2 = [1, 4, 10, 11, 19, 18, 28, 39]
    >>> fit = fit_meta_d_MLE(nR_S1, nR_S2, collapse=2)

    References
    ----------
    ..[1] Hautus, M. J. (1995). Corrections for extreme proportions and their
      biasing effects on estimated values of d'. Behavior Research Methods,
    Instruments, & Computers, 27, 46-51.

    ..[2] Snodgrass, J. G., & Corwin, J. (1988). Pragmatics of measuring
      recognition memory: Applications to dementia and amnesia. Journal of
      Experimental Psychology: General, 117(1), 34–50.
      https://doi.org/10.1037/0096-3445.117.1.34
    """
    if isinstance(data, pd.DataFrame):
        if padAmount is None:
            padAmount = 1 / (2 * nRatings)
        nR_S1, nR_S2 = trials2counts(
            data=data,
            stimuli=stimuli,
            accuracy=accuracy,
            confidence=confidence,
            nRatings=nRatings,
            padding=padding,
            padAmount=padAmount,
        )
    if isinstance(nR_S1, list):
        nR_S1 = np.array(nR_S1)
    if isinstance(nR_S2, list):
        nR_S2 = np.array(nR_S2)
    if (len(nR_S1) % 2) != 0:
        raise ValueError("input arrays must have an even number of elements")
    if len(nR_S1) != len(nR_S2):
        raise ValueError("input arrays must have the same number of elements")
    if (padding is False) & (collapse is None):
        if any(np.array(nR_S1) == 0) or any(np.array(nR_S2) == 0):
            import warnings

            warnings.warn(
                (
                    "Your inputs contain zeros and is not corrected. "
                    " This may interfere with proper estimation of meta-d."
                    " See docstrings for more information."
                )
            )
    elif (padding is True) & (collapse is None):
        # A small padding is required to avoid problems in model fit if any
        # confidence ratings aren't used (see Hautus MJ, 1995 for details)
        if padAmount is None:
            padAmount = 1 / len(nR_S1)
        nR_S1 = nR_S1 + padAmount
        nR_S2 = nR_S2 + padAmount
    elif (padding is False) & (collapse is not None):
        # Collapse values accross ratings to avoid problems in model fit
        nR_S1 = nR_S1.reshape(int(len(nR_S1) / collapse), 2).sum(axis=1)
        nR_S2 = nR_S2.reshape(int(len(nR_S2) / collapse), 2).sum(axis=1)
    elif (padding is True) & (collapse is not None):
        raise ValueError("Both padding and collapse are True.")

    nRatings = int(len(nR_S1) / 2)  # number of ratings in the experiment
    nCriteria = int(2 * nRatings - 1)  # number criteria to be fitted

    # parameters
    # meta-d' - 1
    # t2c - nCriteria-1
    # constrain type 2 criteria values, such that t2c(i) is always <= t2c(i+1)
    # -->  t2c(i+1) >= t2c(i) + 1e-5 (i.e. very small deviation from equality)
    A, ub, lb = [], [], []
    for ii in range(nCriteria - 2):
        tempArow = []
        tempArow.extend(np.zeros(ii + 1))
        tempArow.extend([1, -1])
        tempArow.extend(np.zeros((nCriteria - 2) - ii - 1))
        A.append(tempArow)
        ub.append(-1e-5)
        lb.append(-np.inf)

    # lower bounds on parameters
    LB = []
    LB.append(-10.0)  # meta-d'
    LB.extend(-20 * np.ones((nCriteria - 1) // 2))  # criteria lower than t1c
    LB.extend(np.zeros((nCriteria - 1) // 2))  # criteria higher than t1c

    # upper bounds on parameters
    UB = []
    UB.append(10.0)  # meta-d'
    UB.extend(np.zeros((nCriteria - 1) // 2))  # criteria lower than t1c
    UB.extend(20 * np.ones((nCriteria - 1) // 2))  # criteria higher than t1c

    # select constant criterion type
    constant_criterion = "meta_d1 * (t1c1 / d1)"  # relative criterion

    # set up initial guess at parameter values
    ratingHR = []
    ratingFAR = []
    for c in range(1, int(nRatings * 2)):
        ratingHR.append(sum(nR_S2[c:]) / sum(nR_S2))
        ratingFAR.append(sum(nR_S1[c:]) / sum(nR_S1))

    # obtain index in the criteria array to mark Type I and Type II criteria
    t1_index = nRatings - 1
    t2_index = list(set(list(range(0, 2 * nRatings - 1))) - set([t1_index]))

    d1 = (1 / s) * fninv(ratingHR[t1_index]) - fninv(ratingFAR[t1_index])
    meta_d1 = d1

    c1 = (-1 / (1 + s)) * (fninv(ratingHR) + fninv(ratingFAR))
    t1c1 = c1[t1_index]
    t2c1 = c1[t2_index]

    # initial values for the minimization function
    guess = [meta_d1]
    guess.extend(list(t2c1 - eval(constant_criterion)))

    # other inputs for the minimization function
    inputObj = [nR_S1, nR_S2, nRatings, d1, t1c1, s, constant_criterion, fncdf, fninv]
    bounds = Bounds(LB, UB)
    linear_constraint = LinearConstraint(A, lb, ub)

    # minimization of negative log-likelihood
    results = minimize(
        fit_meta_d_logL,
        guess,
        args=(inputObj),
        method="trust-constr",
        jac="2-point",
        hess=SR1(),
        constraints=[linear_constraint],
        options={"verbose": verbose},
        bounds=bounds,
    )

    # quickly process some of the output
    meta_d1 = results.x[0]
    t2c1 = results.x[1:] + eval(constant_criterion)
    logL = -results.fun

    # I_nR and C_nR are rating trial counts for incorrect and correct trials
    # element i corresponds to # (in)correct w/ rating i
    I_nR_rS2 = nR_S1[nRatings:]
    I_nR_rS1 = list(np.flip(nR_S2[0:nRatings], axis=0))

    C_nR_rS2 = nR_S2[nRatings:]
    C_nR_rS1 = list(np.flip(nR_S1[0:nRatings], axis=0))

    obs_FAR2_rS2 = [
        sum(I_nR_rS2[(i + 1) :]) / sum(I_nR_rS2) for i in range(nRatings - 1)
    ]
    obs_HR2_rS2 = [
        sum(C_nR_rS2[(i + 1) :]) / sum(C_nR_rS2) for i in range(nRatings - 1)
    ]
    obs_FAR2_rS1 = [
        sum(I_nR_rS1[(i + 1) :]) / sum(I_nR_rS1) for i in range(nRatings - 1)
    ]
    obs_HR2_rS1 = [
        sum(C_nR_rS1[(i + 1) :]) / sum(C_nR_rS1) for i in range(nRatings - 1)
    ]

    # find estimated t2FAR and t2HR
    S1mu = -meta_d1 / 2
    S1sd = 1
    S2mu = meta_d1 / 2
    S2sd = S1sd / s

    mt1c1 = eval(constant_criterion)

    C_area_rS2 = 1 - fncdf(mt1c1, S2mu, S2sd)
    I_area_rS2 = 1 - fncdf(mt1c1, S1mu, S1sd)

    C_area_rS1 = fncdf(mt1c1, S1mu, S1sd)
    I_area_rS1 = fncdf(mt1c1, S2mu, S2sd)

    est_FAR2_rS2, est_HR2_rS2 = [], []
    est_FAR2_rS1, est_HR2_rS1 = [], []

    for i in range(nRatings - 1):

        t2c1_lower = t2c1[(nRatings - 1) - (i + 1)]
        t2c1_upper = t2c1[(nRatings - 1) + i]

        I_FAR_area_rS2 = 1 - fncdf(t2c1_upper, S1mu, S1sd)
        C_HR_area_rS2 = 1 - fncdf(t2c1_upper, S2mu, S2sd)

        I_FAR_area_rS1 = fncdf(t2c1_lower, S2mu, S2sd)
        C_HR_area_rS1 = fncdf(t2c1_lower, S1mu, S1sd)

        est_FAR2_rS2.append(I_FAR_area_rS2 / I_area_rS2)
        est_HR2_rS2.append(C_HR_area_rS2 / C_area_rS2)

        est_FAR2_rS1.append(I_FAR_area_rS1 / I_area_rS1)
        est_HR2_rS1.append(C_HR_area_rS1 / C_area_rS1)

    # package output
    fit = {}
    fit["da"] = np.sqrt(2 / (1 + s ** 2)) * s * d1
    fit["s"] = s
    fit["meta_da"] = np.sqrt(2 / (1 + s ** 2)) * s * meta_d1
    fit["M_diff"] = fit["meta_da"] - fit["da"]
    fit["M_ratio"] = fit["meta_da"] / fit["da"]

    mt1c1 = eval(constant_criterion)
    fit["meta_ca"] = (np.sqrt(2) * s / np.sqrt(1 + s ** 2)) * mt1c1

    t2ca = (np.sqrt(2) * s / np.sqrt(1 + s ** 2)) * np.array(t2c1)
    fit["t2ca_rS1"] = t2ca[0 : nRatings - 1]
    fit["t2ca_rS2"] = t2ca[(nRatings - 1) :]

    fit["d1"] = d1
    fit["meta_d1"] = meta_d1
    fit["s"] = s
    fit["meta_c1"] = mt1c1
    fit["t2c1_rS1"] = t2c1[0 : nRatings - 1]
    fit["t2c1_rS2"] = t2c1[(nRatings - 1) :]
    fit["logL"] = logL

    fit["est_HR2_rS1"] = est_HR2_rS1
    fit["obs_HR2_rS1"] = obs_HR2_rS1

    fit["est_FAR2_rS1"] = est_FAR2_rS1
    fit["obs_FAR2_rS1"] = obs_FAR2_rS1

    fit["est_HR2_rS2"] = est_HR2_rS2
    fit["obs_HR2_rS2"] = obs_HR2_rS2

    fit["est_FAR2_rS2"] = est_FAR2_rS2
    fit["obs_FAR2_rS2"] = obs_FAR2_rS2

    if output_df is True:
        return pd.DataFrame(fit)
    elif output_df is False:
        return fit
Exemple #22
0
def mdf_process(aircraft, search_domain, criterion):
    """
    Compute criterion and constraints
    """

    if (aircraft.propulsion.architecture == 1):
        start_value = (aircraft.turbofan_engine.reference_thrust,
                       aircraft.wing.area)
    elif (aircraft.propulsion.architecture == 2):
        start_value = (aircraft.turbofan_engine.reference_thrust,
                       aircraft.wing.area)
    elif (aircraft.propulsion.architecture == 3):
        start_value = (aircraft.turbofan_engine.reference_thrust,
                       aircraft.wing.area)
    elif (aircraft.propulsion.architecture == 4):
        start_value = (aircraft.turboprop_engine.reference_thrust,
                       aircraft.wing.area)
    else:
        raise Exception("propulsion.architecture index is out of range")

    if (criterion == "MTOW"):
        crit_index = 0
    elif (criterion == "block_fuel"):
        crit_index = 1
    elif (criterion == "CO2_metric"):
        crit_index = 2
    elif (criterion == "COC"):
        crit_index = 3
    elif (criterion == "DOC"):
        crit_index = 4
    else:
        raise Exception("Criterion name is unknown")

    eval_mda0(aircraft)  # Initialization (compulsory only with mda3)

    crit_ref, cst_ref = eval_optim_data(start_value, aircraft, crit_index, 1.)

    res = minimize(
        eval_optim_crt,
        start_value,
        args=(
            aircraft,
            crit_index,
            crit_ref,
        ),
        method="trust-constr",
        jac="3-point",
        hess=SR1(),
        hessp=None,
        bounds=search_domain,
        tol=1e-5,
        constraints=NonlinearConstraint(
            fun=lambda x: eval_optim_cst(x, aircraft, crit_index, crit_ref),
            lb=0.,
            ub=np.inf,
            jac='3-point'),
        options={
            'maxiter': 500,
            'gtol': 1e-13
        })
    #              tol=None, callback=None,
    #              options={'grad': None, 'xtol': 1e-08, 'gtol': 1e-08, 'barrier_tol': 1e-08,
    #                       'sparse_jacobian': None, 'maxiter': 1000, 'verbose': 0,
    #                       'finite_diff_rel_step': None, 'initial_constr_penalty': 1.0,
    #                       'initial_tr_radius': 1.0, 'initial_barrier_parameter': 0.1,
    #                       'initial_barrier_tolerance': 0.1, 'factorization_method': None, 'disp': False})

    # res = minimize(eval_optim_crt, start_value, args=(aircraft,crit_index,crit_ref,), method="SLSQP", bounds=search_domain,
    #                constraints={"type":"ineq","fun":eval_optim_cst,"args":(aircraft,crit_index,crit_ref,)},
    #                jac="2-point",options={"maxiter":30,"ftol":0.0001,"eps":0.01},tol=1e-14)

    #res = minimize(eval_optim_crt, x_in, args=(aircraft,crit_index,crit_ref,), method="COBYLA", bounds=((110000,140000),(120,160)),
    #               constraints={"type":"ineq","fun":eval_optim_cst,"args":(aircraft,crit_index,crit_ref,)},
    #               options={"maxiter":100,"tol":0.1,"catol":0.0002,'rhobeg': 1.0})
    print(res)

    return res
Exemple #23
0
def simple_constrained_minimization_example():
    # Consider a minimization problem with several constraints.
    fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
    cons = ({
        'type': 'ineq',
        'fun': lambda x: x[0] - 2 * x[1] + 2
    }, {
        'type': 'ineq',
        'fun': lambda x: -x[0] - 2 * x[1] + 6
    }, {
        'type': 'ineq',
        'fun': lambda x: -x[0] + 2 * x[1] + 2
    })
    bnds = ((0, None), (0, None))
    res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, constraints=cons)
    print('Solution =', res.x)

    # Trust-region constrained algorithm.
    bounds = Bounds([0, -0.5], [1.0, 2.0])
    linear_constraint = LinearConstraint([[1, 2], [2, 1]], [-np.inf, 1],
                                         [1, 1])
    nonlinear_constraint = NonlinearConstraint(cons_f,
                                               -np.inf,
                                               1,
                                               jac=cons_J,
                                               hess=cons_H)
    #nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=cons_H_sparse)
    #nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=cons_H_linear_operator)
    #nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=BFGS())
    #nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess='2-point')
    #nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac='2-point', hess=BFGS())

    x0 = np.array([0.5, 0])
    res = minimize(rosen,
                   x0,
                   method='trust-constr',
                   jac=rosen_der,
                   hess=rosen_hess,
                   constraints=[linear_constraint, nonlinear_constraint],
                   options={'verbose': 1},
                   bounds=bounds)
    print('Solution =', res.x)
    res = minimize(rosen,
                   x0,
                   method='trust-constr',
                   jac=rosen_der,
                   hess=rosen_hess_linop,
                   constraints=[linear_constraint, nonlinear_constraint],
                   options={'verbose': 1},
                   bounds=bounds)
    print('Solution =', res.x)
    res = minimize(rosen,
                   x0,
                   method='trust-constr',
                   jac=rosen_der,
                   hessp=rosen_hess_p,
                   constraints=[linear_constraint, nonlinear_constraint],
                   options={'verbose': 1},
                   bounds=bounds)
    print('Solution =', res.x)
    res = minimize(rosen,
                   x0,
                   method='trust-constr',
                   jac='2-point',
                   hess=SR1(),
                   constraints=[linear_constraint, nonlinear_constraint],
                   options={'verbose': 1},
                   bounds=bounds)
    print('Solution =', res.x)