Example #1
0
solver.setSensitivityMethod(
    amici.SensitivityMethod_forward)  # ... forward sensitivities
model.requireSensitivitiesForAllParameters()  # ... w.r.t. all parameters

# play with FSA tolerances
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 1
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(
    logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)
                               ) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g)  #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))

# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])
Example #2
0
import LogicleScale
from LogicleScale import logicleTransform
import numpy as np

# logicle_object = LogicleScale.LogicleObject(T=3, end_lin=1e-5)

par = np.linspace(0, 100, 100)

l = logicleTransform(par, T=100.0, end_lin=1e-5)

l1 = LogicleScale.logicleInverseTransform(l[0], l[1])

l2 = LogicleScale.logicleGradient(l[0], l[1])

l3 = LogicleScale.logicleInverseGradient(1, l[1])