def test_calcul_direct(u, f, a, t_f, dt, h_max, discretisation_h=None): """ u : fonction solution f : second membre a: (du/dz (0) )(t) """ borne_z = 1 discretisation_h = np.array(np.linspace(0, borne_z, 1 + borne_z / h_max)) if discretisation_h is None: discretisation_h = discretisation(h_max) # discretisation non uniforme def b(t): return u(borne_z, t) nu_1_2 = give_nu_plus_un_demi(discretisation_h, nu) u0 = u(discretisation_h, 0) K = calculer_K(discretisation_h, nu_1_2) M = calculer_M(K.shape[0] - 1) dis = discretisation_h[1:-1] all_f = [] for t in np.linspace(dt, t_f, t_f / dt): all_f.append(np.concatenate(([a(t)], f(dis, t), [b(t)]))) hat_u = res_direct_tridiagonal(K, M, u0, all_f, dt)[-1] """ hat_u = res_direct(u0, f, discretisation_h, dt, t_f, nu_1_2, a, b) """ # calcul d'erreur quadratique: uf = u(discretisation_h, t_f) return max(abs(uf - hat_u))
def phi_test(Phi, discretisation_h, dt): """ teste le calcul des alpha avec Phi """ u0 = u3(discretisation_h, 0) h_first = discretisation_h[1] - discretisation_h[0] last_z = discretisation_h[-1] t_f = 1 nu_1_2 = give_nu_plus_un_demi(discretisation_h, nu) K = calculer_K(discretisation_h, nu_1_2) M = calculer_M(len(discretisation_h) - 1) second_membre = [] for t in np.linspace(dt, t_f, t_f/dt): second_membre.append( np.concatenate(([np.exp(3+t) * (1 + h_first / 2)], f3(discretisation_h, t), [u3(last_z, t)]))) alpha = calcul_alpha(Phi,M,K,u0,second_membre,dt) u_direct = res_direct_tridiagonal(K, M, u0, second_membre, dt) diff = [max(abs(Phi@alphan - un)) for alphan, un in zip(alpha, u_direct)] #calcul de l'erreur entre hat_u et u_reel u_real = u3(discretisation_h, t_f) return max(diff)
except: raise """ #on pose le problème : m = 51 dt = 0.1 z_max = 6 discretisation_h = np.array(np.linspace(0, z_max, m)) Phi_arbitraire = np.reshape(np.exp(discretisation_h), (m, 1)) nu_1_2 = give_nu_plus_un_demi(discretisation_h, nu) u0 = u(discretisation_h, 0) dis = discretisation_h[1:-1] # all_f = [np.concatenate(([np.exp(i*dt)], f(dis, i*dt), [u(z_max,i*dt)])) \ all_f = [np.concatenate(([0], f(dis, i*dt), [u(z_max,i*dt)])) \ for i in range(1, 10)] K = calculer_K(discretisation_h, nu_1_2) ensemble_apprentissage = [(u0, K, all_f, dt)] Phi = None try: for _ in range(4): if Phi is not None: Phi = np.hstack((Phi, Phi_arbitraire)) else: Phi = np.copy(Phi_arbitraire) Phi = np.ravel(Phi) res_opti = opti.minimize(calcul_lagrangien, Phi, jac=calcul_gradient, args=(m, ensemble_apprentissage), method='BFGS') print(res_opti.message)