Пример #1
0
# play with FSA tolerances
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 1
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(
    logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)
                               ) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g)  #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))

# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 22)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])
best_par_paper = [
    0.59, 0.025, 0.009, 21.5e-6, 3.6e-8, 7.5e-6, 0.75, 5.5e-2, 1.8e-7, 1.8e-5,
    0.64, 0.15, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1, 1, 1, 1
]

best_par_log = 10**np.array([
    -0.07426949, -1.5424396, -1.71845154, -4.79418012, -10., -5.23332452,
    -0.12601026, -4.99988123, -9.99999998, -4.55638098, -0.30898867,
    -0.90503462, -10., -6.96050283, -8.54550995, -10., -0.80136887, -10.,
    -8.31990936, -4.27926881, -4.63058586, 0, 0, 0, 0
])

logicle_obj = LogicleObject(T=100, end_lin=1e-5)
best_par_logicle = logicleInverseTransform([
    0.73294927, 0.54583766, 0.52801805, 0.13235649, 0., 0.03449152, 0.71937865,
    0.00973187, 0.09015665, 0.05718341, 0.68776811, 0.62018734, 0.27800834,
    0.08857416, 0.27061034, 0.24731772, 0.63725277, 0., 0.0983246, 0.00224438,
    0.054455, 1, 1, 1, 1
], logicle_obj)

logicle_obj = LogicleObject(T=100, end_lin=1e-5)
best_par_red = logicleInverseTransform([
    0.71403861, 0.53124423, 0.49639239, 0.07264355, 0, 0.04018985, 0.7201659,
    0, 0.0019951, 0.12294839, 0.7012356, 0.61790586, 0, 0, 0, 0, 0.56526125, 0,
    0, 0, 0, 1, 1, 1, 1
], logicle_obj)

best_par_red = 10**np.array([
    -0.11508453, -1.59008064, -1.71610071, -4.63725562, -100, -5.22995268,
    -0.10316299, -100, -7.09262244, -4.62050801, -0.31441241, -0.90634609,
    -100, -100, -100, -100, -0.77518533, -100, -100, -100, -100, 0, 0, 0, 0
])
Пример #3
0
model.requireSensitivitiesForAllParameters()                   # ... w.r.t. all parameters

# play with FSA tolerances
solver.setRelativeToleranceFSA(rtol=1e-10)
solver.setAbsoluteToleranceFSA(atol=1e-10)

# CREATE LOGICLE OBJECTIVE FUNCTION __________________________________________________________________________________

obj_lin = importer.create_objective(solver=solver)

# logicle parameter
T = 10
end_lin = 1e-5
logicle_obj = LogicleScale.LogicleObject(T=T, end_lin=end_lin)

f = lambda x: obj_lin.get_fval(logicleInverseTransform(par=x, logicle_object=logicle_obj))
g = lambda x: obj_lin.get_grad(logicleInverseTransform(x, logicle_obj)) * logicleInverseGradient(x, logicle_obj)

obj = pypesto.Objective(fun=f, grad=g) #, hess=h)

print('optimal x = ', petab_problem.x_nominal)
print('optimal lh value', obj(petab_problem.x_nominal))


# check gradient at optimum and at random point
check_grad_1 = obj.check_grad(petab_problem.x_nominal)
print(check_grad_1[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])

x_random = np.random.normal(0.5, 0.005, 12)
check_grad_2 = obj.check_grad(x_random)
print(check_grad_2[np.array(['grad', 'fd_c', 'abs_err', 'rel_err'])])
best_par_paper = [
    0.59, 0.025, 0.009, 21.5e-6, 3.6e-8, 7.5e-6, 0.75, 5.5e-2, 1.8e-7, 1.8e-5,
    0.64, 0.15, 1, 1, 1, 1
]

# optimal log10 parameter
best_par_log = 10**np.array([
    -0.2390705, -1.59656206, -1.92944291, -4.61622465, -9.99597055,
    -5.12195751, -0.1410224, -4.80272898, -10., -4.70446442, -0.26370535,
    -0.91631592, 0, 0, 0, 0
])

# optimal logicle parameter
logicle_obj = LogicleObject(T=100, end_lin=1e-6)
best_par_logicle = logicleInverseTransform([
    0.74506269, 0.58513538, 0.53479985, 0.17364882, 0., 0.16668271, 0.75314595,
    0.49857313, 0., 0.21001242, 0.74363556, 0.66104223, 1, 1, 1, 1
], logicle_obj)

# optimal log10 + 1e-5 parameter
best_par_logE_E = 10**np.array([
    -0.2212495, -1.55911411, -1.99446709, -4.66316381, -5., -4.76937694,
    -0.12450302, -4.99997509, -4.97341095, -4.60686091, -0.22922818,
    -0.92992495, 0, 0, 0, 0
]) - 1e-5

par_dict = {
    'nominal': best_par_paper,
    '$\log_{10}$': best_par_log,
    'logicle': best_par_logicle,
    '$\log_{10}(\\theta+10^{-5}$)': best_par_logE_E
}
Пример #5
0
p3 = [1000.0]

for i in range(0, len(p2)):
    par.append(p2[i])
par.append(p3[0])

T = 1000
end_lin = 1e-5

# transform your parameter
par_logicle = logicleTransform(par=par, T=T, end_lin=end_lin)

# calculate transition
W = par_logicle[1].W
M = par_logicle[1].M
transition = logicleInverseTransform(np.array([W / (M - W)]), par_logicle[1])

ax1.plot(par, par_logicle[0], color='black', label='logicle')
ax1.set_xlabel("linear parameter value ($\\theta$)", fontsize=fontsize)
ax1.set_ylabel("transformed parameter value ($\\xi$)", fontsize=fontsize)
ax1.plot(par, 5580 * np.array(par), '--', color='green', label='linear')
ax1.plot(par[1:],
         0.115 * np.log10(par[1:]) + 0.65,
         '--',
         color='red',
         label='logarithmic')
ax1.set_xscale('log')
ax1.set_xlim(1e-9, 1e3)
ax1.set_ylim(0, 1)
ax1.legend(loc='lower right')
ax1.text(-0.1, 1.05, 'A', transform=ax1.transAxes, size=20, weight='bold')
best_par_paper = [
    0.59, 0.025, 0.009, 21.5e-6, 3.6e-8, 7.5e-6, 0.75, 5.5e-2, 1.8e-7, 1.8e-5,
    0.64, 0.15, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1, 1, 1, 1
]

best_par_log = 10**np.array([
    -0.07426949, -1.5424396, -1.71845154, -4.79418012, -10., -5.23332452,
    -0.12601026, -4.99988123, -9.99999998, -4.55638098, -0.30898867,
    -0.90503462, -10., -6.96050283, -8.54550995, -10., -0.80136887, -10.,
    -8.31990936, -4.27926881, -4.63058586, 0, 0, 0, 0
])

logicle_obj = LogicleObject(T=100, end_lin=1e-5)
best_par_logicle = logicleInverseTransform([
    0.73294927, 0.54583766, 0.52801805, 0.13235649, 0., 0.03449152, 0.71937865,
    0.00973187, 0.09015665, 0.05718341, 0.68776811, 0.62018734, 0.27800834,
    0.08857416, 0.27061034, 0.24731772, 0.63725277, 0., 0.0983246, 0.00224438,
    0.054455, 1, 1, 1, 1
], logicle_obj)

best_par_logE = 10**np.array([
    -0.05851094, -4.98666302, -1.58520019, -4.99637148, -4.67216309, -5.,
    -0.14024413, -4.99803219, -4.9649339, -5., 0.30152854, -1.04902968, -5.,
    -4.94896808, -2.27177367, -5., -0.55644988, -5., 0.33648427, -5.,
    -4.99647165, 0, 0, 0, 0
]) - 1e-5

# best_par_logE = 10**np.array([-0.20575338, -1.60353286, -1.26472785, -4.90051486, -5., -4.88502002, -0.16959158,
#                               -4.99985951, -4.81310713, -5., 0.09150793, -0.97268348, -5., -4.81799054, -5., -5.,
#                               -0.73464377, -4.7346505, 0.1970503,  -5.,-4.74533977,0,0,0,0]) -1e-5
best_par_logE_aic = 10**np.array([
    -0.09637444, -1.37579749, -1.51133641, -3.04276312, -5., -5, -0.14580072,