Beispiel #1
0
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 1
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(
    logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)
                               ) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g)  #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))

# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 22)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

# OPTIMIZATION WITHOUT PRIOR ___________________________________________________________________________________________
Beispiel #2
0
# play with FSA tolerances
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 10
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g) #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))


# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 12)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])