def test_p2d_add_ghost_nodes(self): # create discretisation mesh = get_p2d_mesh_for_testing() spatial_methods = { "macroscale": pybamm.FiniteVolume(), "negative particle": pybamm.FiniteVolume(), "positive particle": pybamm.FiniteVolume(), } disc = pybamm.Discretisation(mesh, spatial_methods) # add ghost nodes c_s_n = pybamm.Variable("c_s_n", domain=["negative particle"]) c_s_p = pybamm.Variable("c_s_p", domain=["positive particle"]) disc.set_variable_slices([c_s_n]) disc_c_s_n = pybamm.StateVector(*disc.y_slices[c_s_n.id]) disc.set_variable_slices([c_s_p]) disc_c_s_p = pybamm.StateVector(*disc.y_slices[c_s_p.id]) bcs = { "left": (pybamm.Scalar(0), "Dirichlet"), "right": (pybamm.Scalar(3), "Dirichlet"), } sp_meth = pybamm.FiniteVolume() sp_meth.build(mesh) c_s_n_plus_ghost, _ = sp_meth.add_ghost_nodes(c_s_n, disc_c_s_n, bcs) c_s_p_plus_ghost, _ = sp_meth.add_ghost_nodes(c_s_p, disc_c_s_p, bcs) mesh_s_n = mesh["negative particle"] mesh_s_p = mesh["positive particle"] n_prim_pts = mesh_s_n[0].npts n_sec_pts = len(mesh_s_n) p_prim_pts = mesh_s_p[0].npts p_sec_pts = len(mesh_s_p) y_s_n_test = np.kron(np.ones(n_sec_pts), np.ones(n_prim_pts)) y_s_p_test = np.kron(np.ones(p_sec_pts), np.ones(p_prim_pts)) # evaluate with and without ghost points c_s_n_eval = disc_c_s_n.evaluate(None, y_s_n_test) c_s_n_ghost_eval = c_s_n_plus_ghost.evaluate(None, y_s_n_test) c_s_p_eval = disc_c_s_p.evaluate(None, y_s_p_test) c_s_p_ghost_eval = c_s_p_plus_ghost.evaluate(None, y_s_p_test) # reshape to make easy to deal with c_s_n_eval = np.reshape(c_s_n_eval, [n_sec_pts, n_prim_pts]) c_s_n_ghost_eval = np.reshape(c_s_n_ghost_eval, [n_sec_pts, n_prim_pts + 2]) c_s_p_eval = np.reshape(c_s_p_eval, [p_sec_pts, p_prim_pts]) c_s_p_ghost_eval = np.reshape(c_s_p_ghost_eval, [p_sec_pts, p_prim_pts + 2]) np.testing.assert_array_equal(c_s_n_ghost_eval[:, 1:-1], c_s_n_eval) np.testing.assert_array_equal(c_s_p_ghost_eval[:, 1:-1], c_s_p_eval) np.testing.assert_array_equal( (c_s_n_ghost_eval[:, 0] + c_s_n_ghost_eval[:, 1]) / 2, 0 ) np.testing.assert_array_equal( (c_s_p_ghost_eval[:, 0] + c_s_p_ghost_eval[:, 1]) / 2, 0 ) np.testing.assert_array_equal( (c_s_n_ghost_eval[:, -2] + c_s_n_ghost_eval[:, -1]) / 2, 3 ) np.testing.assert_array_equal( (c_s_p_ghost_eval[:, -2] + c_s_p_ghost_eval[:, -1]) / 2, 3 )
def test_model_solver_dae_multiple_nonsmooth_python(self): model = pybamm.BaseModel() model.convert_to_format = "python" whole_cell = ["negative electrode", "separator", "positive electrode"] var1 = pybamm.Variable("var1", domain=whole_cell) var2 = pybamm.Variable("var2", domain=whole_cell) a = 0.6 discontinuities = (np.arange(3) + 1) * a model.rhs = {var1: pybamm.Modulo(pybamm.t, a)} model.algebraic = {var2: 2 * var1 - var2} model.initial_conditions = {var1: 0, var2: 0} model.events = [ pybamm.Event("var1 = 0.55", pybamm.min(var1 - 0.55)), pybamm.Event("var2 = 1.2", pybamm.min(var2 - 1.2)), ] for discontinuity in discontinuities: model.events.append( pybamm.Event("nonsmooth rate", pybamm.Scalar(discontinuity))) disc = get_discretisation_for_testing() disc.process_model(model) # Solve solver = pybamm.ScikitsDaeSolver(rtol=1e-8, atol=1e-8, root_method="lm") # create two time series, one without a time point on the discontinuity, # and one with t_eval1 = np.linspace(0, 2, 10) t_eval2 = np.insert(t_eval1, np.searchsorted(t_eval1, discontinuities), discontinuities) solution1 = solver.solve(model, t_eval1) solution2 = solver.solve(model, t_eval2) # check time vectors for solution in [solution1, solution2]: # time vectors are ordered self.assertTrue(np.all(solution.t[:-1] <= solution.t[1:])) # time value before and after discontinuity is an epsilon away for discontinuity in discontinuities: dindex = np.searchsorted(solution.t, discontinuity) value_before = solution.t[dindex - 1] value_after = solution.t[dindex] self.assertEqual(value_before + sys.float_info.epsilon, discontinuity) self.assertEqual(value_after - sys.float_info.epsilon, discontinuity) # both solution time vectors should have same number of points self.assertEqual(len(solution1.t), len(solution2.t)) # check solution for solution in [solution1, solution2]: np.testing.assert_array_less(solution.y[0, :-1], 0.55) np.testing.assert_array_less(solution.y[-1, :-1], 1.2) var1_soln = (solution.t % a)**2 / 2 + a**2 / 2 * (solution.t // a) var2_soln = 2 * var1_soln np.testing.assert_allclose(solution.y[0], var1_soln, rtol=1e-06) np.testing.assert_allclose(solution.y[-1], var2_soln, rtol=1e-06)
# 3. State governing equations --------------------------------------------------------- R = k * pybamm.BoundaryValue(c, "left") # SEI reaction flux N = -(1 / L) * D(c) * pybamm.grad(c) # solvent flux dcdt = (V_hat * R) * pybamm.inner(x / L, pybamm.grad(c)) - ( 1 / L) * pybamm.div(N) # solvent concentration governing equation dLdt = V_hat * R # SEI thickness governing equation model.rhs = {c: dcdt, L: dLdt} # add to model # 4. State boundary conditions --------------------------------------------------------- D_left = pybamm.BoundaryValue( D(c), "left") # pybamm requires BoundaryValue(D(c)) and not D(BoundaryValue(c)) grad_c_left = L * R / D_left # left bc c_right = pybamm.Scalar(1) # right bc # add to model model.boundary_conditions = { c: { "left": (grad_c_left, "Neumann"), "right": (c_right, "Dirichlet") } } # 5. State initial conditions ---------------------------------------------------------- model.initial_conditions = {c: pybamm.Scalar(1), L: pybamm.Scalar(1)} # 6. State output variables ------------------------------------------------------------ model.variables = { "SEI thickness": L,
def test_process_symbol(self): parameter_values = pybamm.ParameterValues({"a": 1, "b": 2, "c": 3}) # process parameter a = pybamm.Parameter("a") processed_a = parameter_values.process_symbol(a) self.assertIsInstance(processed_a, pybamm.Scalar) self.assertEqual(processed_a.value, 1) # process binary operation b = pybamm.Parameter("b") add = a + b processed_add = parameter_values.process_symbol(add) self.assertIsInstance(processed_add, pybamm.Addition) self.assertIsInstance(processed_add.children[0], pybamm.Scalar) self.assertIsInstance(processed_add.children[1], pybamm.Scalar) self.assertEqual(processed_add.children[0].value, 1) self.assertEqual(processed_add.children[1].value, 2) scal = pybamm.Scalar(34) mul = a * scal processed_mul = parameter_values.process_symbol(mul) self.assertIsInstance(processed_mul, pybamm.Multiplication) self.assertIsInstance(processed_mul.children[0], pybamm.Scalar) self.assertIsInstance(processed_mul.children[1], pybamm.Scalar) self.assertEqual(processed_mul.children[0].value, 1) self.assertEqual(processed_mul.children[1].value, 34) # process integral aa = pybamm.Parameter("a", domain=["negative electrode"]) x = pybamm.SpatialVariable("x", domain=["negative electrode"]) integ = pybamm.Integral(aa, x) processed_integ = parameter_values.process_symbol(integ) self.assertIsInstance(processed_integ, pybamm.Integral) self.assertIsInstance(processed_integ.children[0], pybamm.Scalar) self.assertEqual(processed_integ.children[0].value, 1) self.assertEqual(processed_integ.integration_variable[0].id, x.id) # process unary operation grad = pybamm.Gradient(a) processed_grad = parameter_values.process_symbol(grad) self.assertIsInstance(processed_grad, pybamm.Gradient) self.assertIsInstance(processed_grad.children[0], pybamm.Scalar) self.assertEqual(processed_grad.children[0].value, 1) # process delta function aa = pybamm.Parameter("a") delta_aa = pybamm.DeltaFunction(aa, "left", "some domain") processed_delta_aa = parameter_values.process_symbol(delta_aa) self.assertIsInstance(processed_delta_aa, pybamm.DeltaFunction) self.assertEqual(processed_delta_aa.side, "left") processed_a = processed_delta_aa.children[0] self.assertIsInstance(processed_a, pybamm.Scalar) self.assertEqual(processed_a.value, 1) # process boundary operator (test for BoundaryValue) aa = pybamm.Parameter("a", domain=["negative electrode"]) x = pybamm.SpatialVariable("x", domain=["negative electrode"]) boundary_op = pybamm.BoundaryValue(aa * x, "left") processed_boundary_op = parameter_values.process_symbol(boundary_op) self.assertIsInstance(processed_boundary_op, pybamm.BoundaryOperator) processed_a = processed_boundary_op.children[0].children[0] processed_x = processed_boundary_op.children[0].children[1] self.assertIsInstance(processed_a, pybamm.Scalar) self.assertEqual(processed_a.value, 1) self.assertEqual(processed_x.id, x.id) # process broadcast whole_cell = ["negative electrode", "separator", "positive electrode"] broad = pybamm.PrimaryBroadcast(a, whole_cell) processed_broad = parameter_values.process_symbol(broad) self.assertIsInstance(processed_broad, pybamm.Broadcast) self.assertEqual(processed_broad.domain, whole_cell) self.assertIsInstance(processed_broad.children[0], pybamm.Scalar) self.assertEqual(processed_broad.children[0].evaluate(), np.array([1])) # process concatenation conc = pybamm.Concatenation(pybamm.Vector(np.ones(10)), pybamm.Vector(2 * np.ones(15))) processed_conc = parameter_values.process_symbol(conc) self.assertIsInstance(processed_conc.children[0], pybamm.Vector) self.assertIsInstance(processed_conc.children[1], pybamm.Vector) np.testing.assert_array_equal(processed_conc.children[0].entries, 1) np.testing.assert_array_equal(processed_conc.children[1].entries, 2) # process domain concatenation c_e_n = pybamm.Variable("c_e_n", ["negative electrode"]) c_e_s = pybamm.Variable("c_e_p", ["separator"]) test_mesh = shared.get_mesh_for_testing() dom_con = pybamm.DomainConcatenation([a * c_e_n, b * c_e_s], test_mesh) processed_dom_con = parameter_values.process_symbol(dom_con) a_proc = processed_dom_con.children[0].children[0] b_proc = processed_dom_con.children[1].children[0] self.assertIsInstance(a_proc, pybamm.Scalar) self.assertIsInstance(b_proc, pybamm.Scalar) self.assertEqual(a_proc.value, 1) self.assertEqual(b_proc.value, 2) # process variable c = pybamm.Variable("c") processed_c = parameter_values.process_symbol(c) self.assertIsInstance(processed_c, pybamm.Variable) self.assertEqual(processed_c.name, "c") # process scalar d = pybamm.Scalar(14) processed_d = parameter_values.process_symbol(d) self.assertIsInstance(processed_d, pybamm.Scalar) self.assertEqual(processed_d.value, 14) # process array types e = pybamm.Vector(np.ones(4)) processed_e = parameter_values.process_symbol(e) self.assertIsInstance(processed_e, pybamm.Vector) np.testing.assert_array_equal(processed_e.evaluate(), np.ones((4, 1))) f = pybamm.Matrix(np.ones((5, 6))) processed_f = parameter_values.process_symbol(f) self.assertIsInstance(processed_f, pybamm.Matrix) np.testing.assert_array_equal(processed_f.evaluate(), np.ones((5, 6))) # process statevector g = pybamm.StateVector(slice(0, 10)) processed_g = parameter_values.process_symbol(g) self.assertIsInstance(processed_g, pybamm.StateVector) np.testing.assert_array_equal(processed_g.evaluate(y=np.ones(10)), np.ones((10, 1))) # not implemented sym = pybamm.Symbol("sym") with self.assertRaises(NotImplementedError): parameter_values.process_symbol(sym) # not found with self.assertRaises(KeyError): x = pybamm.Parameter("x") parameter_values.process_symbol(x)
def get_coupled_variables(self, variables): # Calculate delta_phi from phi_s and phi_e if it isn't already known if self.domain + " electrode surface potential difference" not in variables: variables = self._get_delta_phi(variables) delta_phi = variables[self.domain + " electrode surface potential difference"] # If delta_phi was broadcast, take only the orphan if isinstance(delta_phi, pybamm.Broadcast): delta_phi = delta_phi.orphans[0] # Get exchange-current density j0 = self._get_exchange_current_density(variables) # Get open-circuit potential variables and reaction overpotential ocp, dUdT = self._get_open_circuit_potential(variables) eta_r = delta_phi - ocp # Get average interfacial current density j_tot_av = self._get_average_total_interfacial_current_density( variables) # j = j_tot_av + (j - pybamm.x_average(j)) # enforce true average # Add SEI resistance if self.options["SEI film resistance"] == "distributed": if self.domain == "Negative": R_sei = self.param.R_sei_n elif self.domain == "Positive": R_sei = self.param.R_sei_p L_sei = variables["Total " + self.domain.lower() + " electrode SEI thickness"] j_tot = variables[ "Total " + self.domain.lower() + " electrode interfacial current density variable"] eta_sei = -j_tot * L_sei * R_sei elif self.options["SEI film resistance"] == "average": if self.domain == "Negative": R_sei = self.param.R_sei_n elif self.domain == "Positive": R_sei = self.param.R_sei_p L_sei = variables["Total " + self.domain.lower() + " electrode SEI thickness"] eta_sei = -j_tot_av * L_sei * R_sei else: eta_sei = pybamm.Scalar(0) eta_r += eta_sei # Get number of electrons in reaction ne = self._get_number_of_electrons_in_reaction() # Get kinetics. Note: T must have the same domain as j0 and eta_r if j0.domain in ["current collector", ["current collector"]]: T = variables["X-averaged cell temperature"] else: T = variables[self.domain + " electrode temperature"] # Update j, except in the "distributed SEI resistance" model, where j will be # found by solving an algebraic equation # (In the "distributed SEI resistance" model, we have already defined j) j = self._get_kinetics(j0, ne, eta_r, T) variables.update(self._get_standard_interfacial_current_variables(j)) variables.update( self._get_standard_total_interfacial_current_variables(j_tot_av)) variables.update(self._get_standard_exchange_current_variables(j0)) variables.update(self._get_standard_overpotential_variables(eta_r)) variables.update(self._get_standard_ocp_variables(ocp, dUdT)) if "main" in self.reaction: variables.update( self._get_standard_sei_film_overpotential_variables(eta_sei)) if ("Negative electrode" + self.reaction_name + " interfacial current density" in variables and "Positive electrode" + self.reaction_name + " interfacial current density" in variables and self.Reaction_icd not in variables): variables.update( self._get_standard_whole_cell_interfacial_current_variables( variables)) variables.update( self._get_standard_whole_cell_exchange_current_variables( variables)) return variables
def _set_scales(self): "Define the scales used in the non-dimensionalisation scheme" # Concentration self.electrolyte_concentration_scale = self.c_e_typ self.negative_particle_concentration_scale = self.c_n_max self.positive_particle_concentration_scale = self.c_n_max # Electrical self.potential_scale = self.R * self.T_ref / self.F self.current_scale = self.i_typ self.j_scale_n = self.i_typ / (self.a_n_dim * self.L_x) self.j_scale_p = self.i_typ / (self.a_p_dim * self.L_x) # Reference OCP based on initial concentration at # current collector/electrode interface sto_n_init = self.c_n_init_dimensional(0) / self.c_n_max self.U_n_ref = self.U_n_dimensional(sto_n_init, self.T_ref) # Reference OCP based on initial concentration at # current collector/electrode interface sto_p_init = self.c_p_init_dimensional(1) / self.c_p_max self.U_p_ref = self.U_p_dimensional(sto_p_init, self.T_ref) # Reference exchange-current density self.j0_n_ref_dimensional = ( self.j0_n_dimensional(self.c_e_typ, self.c_n_max / 2, self.T_ref) * 2) self.j0_p_ref_dimensional = ( self.j0_p_dimensional(self.c_e_typ, self.c_p_max / 2, self.T_ref) * 2) # Thermal self.Delta_T = self.therm.Delta_T # Velocity scale self.velocity_scale = pybamm.Scalar(1) # Discharge timescale self.tau_discharge = self.F * self.c_n_max * self.L_x / self.i_typ # Reaction timescales self.tau_r_n = (self.F * self.c_n_max / (self.j0_n_ref_dimensional * self.a_n_dim)) self.tau_r_p = (self.F * self.c_p_max / (self.j0_p_ref_dimensional * self.a_p_dim)) # Electrolyte diffusion timescale self.D_e_typ = self.D_e_dimensional(self.c_e_typ, self.T_ref) self.tau_diffusion_e = self.L_x**2 / self.D_e_typ # Particle diffusion timescales self.tau_diffusion_n = self.R_n**2 / self.D_n_dimensional( pybamm.Scalar(1), self.T_ref) self.tau_diffusion_p = self.R_p**2 / self.D_p_dimensional( pybamm.Scalar(1), self.T_ref) # Thermal diffusion timescale self.tau_th_yz = self.therm.tau_th_yz # Choose discharge timescale self.timescale = self.tau_discharge
def D_n(self, c_s_n, T): "Dimensionless negative particle diffusivity" sto = c_s_n T_dim = self.Delta_T * T + self.T_ref return self.D_n_dimensional(sto, T_dim) / self.D_n_dimensional( pybamm.Scalar(1), self.T_ref)
def test_diff_state_vector_dot(self): a = pybamm.StateVectorDot(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) self.assertEqual(a.diff(a).id, pybamm.Scalar(1).id) self.assertEqual(a.diff(b).id, pybamm.Scalar(0).id)
def set_initial_conditions(self, variables): l_cr = variables[self.domain + " particle crack length"] l0 = pybamm.PrimaryBroadcast(pybamm.Scalar(1), [self.domain.lower() + " electrode"]) self.initial_conditions = {l_cr: l0}
def x_average(symbol): """convenience function for creating an average in the x-direction Parameters ---------- symbol : :class:`pybamm.Symbol` The function to be averaged Returns ------- :class:`Symbol` the new averaged symbol """ # Can't take average if the symbol evaluates on edges if symbol.evaluates_on_edges(): raise ValueError( "Can't take the x-average of a symbol that evaluates on edges") # If symbol doesn't have a domain, its average value is itself if symbol.domain in [[], ["current collector"]]: new_symbol = symbol.new_copy() new_symbol.parent = None return new_symbol # If symbol is a Broadcast, its average value is its child elif isinstance(symbol, pybamm.Broadcast): return symbol.orphans[0] # If symbol is a concatenation of Broadcasts, its average value is its child elif (isinstance(symbol, pybamm.Concatenation) and all( isinstance(child, pybamm.Broadcast) for child in symbol.children) and symbol.domain == ["negative electrode", "separator", "positive electrode"]): a, b, c = [orp.orphans[0] for orp in symbol.orphans] if a.id == b.id == c.id: return a else: l_n = pybamm.geometric_parameters.l_n l_s = pybamm.geometric_parameters.l_s l_p = pybamm.geometric_parameters.l_p return (l_n * a + l_s * b + l_p * c) / (l_n + l_s + l_p) # Otherwise, use Integral to calculate average value else: if symbol.domain == ["negative electrode"]: x = pybamm.standard_spatial_vars.x_n l = pybamm.geometric_parameters.l_n elif symbol.domain == ["separator"]: x = pybamm.standard_spatial_vars.x_s l = pybamm.geometric_parameters.l_s elif symbol.domain == ["positive electrode"]: x = pybamm.standard_spatial_vars.x_p l = pybamm.geometric_parameters.l_p elif symbol.domain == [ "negative electrode", "separator", "positive electrode" ]: x = pybamm.standard_spatial_vars.x l = pybamm.Scalar(1) elif symbol.domain == ["negative particle"]: x = pybamm.standard_spatial_vars.x_n l = pybamm.geometric_parameters.l_n elif symbol.domain == ["positive particle"]: x = pybamm.standard_spatial_vars.x_p l = pybamm.geometric_parameters.l_p else: x = pybamm.SpatialVariable("x", domain=symbol.domain) v = pybamm.ones_like(symbol) l = pybamm.Integral(v, x) return Integral(symbol, x) / l
def test_diff_zero(self): a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) func = (a * 2 + 5 * (-a)) / (a * a) self.assertEqual(func.diff(b).id, pybamm.Scalar(0).id) self.assertNotEqual(func.diff(a).id, pybamm.Scalar(0).id)
def _unary_jac(self, child_jac): """ See :meth:`pybamm.UnaryOperator._unary_jac()`. """ return pybamm.Scalar(0)
def diff(self, variable): """ See :meth:`pybamm.Symbol.diff()`. """ return pybamm.Scalar(0)
def _jac(self, variable): """ See :meth:`pybamm.Symbol._jac()`. """ return pybamm.Scalar(0)
def __init__(self, name="Single Particle Model"): super().__init__({}, name) pybamm.citations.register("Marquis2019") # `param` is a class containing all the relevant parameters and functions for # this model. These are purely symbolic at this stage, and will be set by the # `ParameterValues` class when the model is processed. param = self.param ###################### # Variables ###################### # Variables that depend on time only are created without a domain Q = pybamm.Variable("Discharge capacity [A.h]") # Variables that vary spatially are created with a domain c_s_n = pybamm.Variable( "X-averaged negative particle concentration", domain="negative particle" ) c_s_p = pybamm.Variable( "X-averaged positive particle concentration", domain="positive particle" ) # Constant temperature T = param.T_init ###################### # Other set-up ###################### # Current density i_cell = param.current_with_time j_n = i_cell / param.l_n j_p = -i_cell / param.l_p ###################### # State of Charge ###################### I = param.dimensional_current_with_time # The `rhs` dictionary contains differential equations, with the key being the # variable in the d/dt self.rhs[Q] = I * param.timescale / 3600 # Initial conditions must be provided for the ODEs self.initial_conditions[Q] = pybamm.Scalar(0) ###################### # Particles ###################### # The div and grad operators will be converted to the appropriate matrix # multiplication at the discretisation stage N_s_n = -param.D_n(c_s_n, T) * pybamm.grad(c_s_n) N_s_p = -param.D_p(c_s_p, T) * pybamm.grad(c_s_p) self.rhs[c_s_n] = -(1 / param.C_n) * pybamm.div(N_s_n) self.rhs[c_s_p] = -(1 / param.C_p) * pybamm.div(N_s_p) # Surf takes the surface value of a variable, i.e. its boundary value on the # right side. This is also accessible via `boundary_value(x, "right")`, with # "left" providing the boundary value of the left side c_s_surf_n = pybamm.surf(c_s_n) c_s_surf_p = pybamm.surf(c_s_p) # Boundary conditions must be provided for equations with spatial derivatives self.boundary_conditions[c_s_n] = { "left": (pybamm.Scalar(0), "Neumann"), "right": ( -param.C_n * j_n / param.a_R_n / param.D_n(c_s_surf_n, T), "Neumann", ), } self.boundary_conditions[c_s_p] = { "left": (pybamm.Scalar(0), "Neumann"), "right": ( -param.C_p * j_p / param.a_R_p / param.gamma_p / param.D_p(c_s_surf_p, T), "Neumann", ), } # c_n_init and c_p_init are functions, but for the SPM we evaluate them at x=0 # and x=1 since there is no x-dependence in the particles self.initial_conditions[c_s_n] = param.c_n_init(0) self.initial_conditions[c_s_p] = param.c_p_init(1) # Events specify points at which a solution should terminate self.events += [ pybamm.Event( "Minimum negative particle surface concentration", pybamm.min(c_s_surf_n) - 0.01, ), pybamm.Event( "Maximum negative particle surface concentration", (1 - 0.01) - pybamm.max(c_s_surf_n), ), pybamm.Event( "Minimum positive particle surface concentration", pybamm.min(c_s_surf_p) - 0.01, ), pybamm.Event( "Maximum positive particle surface concentration", (1 - 0.01) - pybamm.max(c_s_surf_p), ), ] # Note that the SPM does not have any algebraic equations, so the `algebraic` # dictionary remains empty ###################### # (Some) variables ###################### # Interfacial reactions j0_n = param.j0_n(1, c_s_surf_n, T) / param.C_r_n j0_p = param.gamma_p * param.j0_p(1, c_s_surf_p, T) / param.C_r_p eta_n = (2 / param.ne_n) * pybamm.arcsinh(j_n / (2 * j0_n)) eta_p = (2 / param.ne_p) * pybamm.arcsinh(j_p / (2 * j0_p)) phi_s_n = 0 phi_e = -eta_n - param.U_n(c_s_surf_n, T) phi_s_p = eta_p + phi_e + param.U_p(c_s_surf_p, T) V = phi_s_p whole_cell = ["negative electrode", "separator", "positive electrode"] # The `variables` dictionary contains all variables that might be useful for # visualising the solution of the model # Primary broadcasts are used to broadcast scalar quantities across a domain # into a vector of the right shape, for multiplying with other vectors self.variables = { "Negative particle surface concentration": pybamm.PrimaryBroadcast( c_s_surf_n, "negative electrode" ), "Electrolyte concentration": pybamm.PrimaryBroadcast(1, whole_cell), "Positive particle surface concentration": pybamm.PrimaryBroadcast( c_s_surf_p, "positive electrode" ), "Current [A]": I, "Negative electrode potential": pybamm.PrimaryBroadcast( phi_s_n, "negative electrode" ), "Electrolyte potential": pybamm.PrimaryBroadcast(phi_e, whole_cell), "Positive electrode potential": pybamm.PrimaryBroadcast( phi_s_p, "positive electrode" ), "Terminal voltage": V, } self.events += [ pybamm.Event("Minimum voltage", V - param.voltage_low_cut), pybamm.Event("Maximum voltage", V - param.voltage_high_cut), ]
def test_jac_of_unary_operator(self): a = pybamm.Scalar(1) b = pybamm.UnaryOperator("Operator", a) y = pybamm.StateVector(slice(0, 1)) with self.assertRaises(NotImplementedError): b.jac(y)
auth.set_access_token(Keys.ACCESS_KEY, Keys.ACCESS_SECRET) api = tweepy.API(auth) # mention = api.mentions_timeline() # Setting up a PyBaMM example model = pybamm.BaseModel() x = pybamm.Variable("x") y = pybamm.Variable("y") dxdt = 4 * x - 2 * y dydt = 3 * x - y model.rhs = {x: dxdt, y: dydt} model.initial_conditions = {x: pybamm.Scalar(1), y: pybamm.Scalar(2)} model.variables = {"x": x, "y": y, "z": x + 4 * y} disc = pybamm.Discretisation() # use the default discretisation disc.process_model(model) solver = pybamm.ScipySolver() t = np.linspace(0, 1, 20) solution = solver.solve(model, t) t_sol, y_sol = solution.t, solution.y # get solution times and states x = solution["x"] # extract and process x from the solution y = solution["y"] # extract and process y from the solution t_fine = np.linspace(0, t[-1], 1000)
def _get_dj_dc(self, variables): return pybamm.Scalar(0)
def _set_dimensionless_parameters(self): "Defines the dimensionless parameters" # Timescale ratios self.C_n = self.tau_diffusion_n / self.tau_discharge self.C_p = self.tau_diffusion_p / self.tau_discharge self.C_e = self.tau_diffusion_e / self.tau_discharge self.C_r_n = self.tau_r_n / self.tau_discharge self.C_r_p = self.tau_r_p / self.tau_discharge self.C_th = self.tau_th_yz / self.tau_discharge # Concentration ratios self.gamma_e = self.c_e_typ / self.c_n_max self.gamma_p = self.c_p_max / self.c_n_max # Macroscale Geometry self.l_cn = self.geo.l_cn self.l_n = self.geo.l_n self.l_s = self.geo.l_s self.l_p = self.geo.l_p self.l_cp = self.geo.l_cp self.l_x = self.geo.l_x self.l_y = self.geo.l_y self.l_z = self.geo.l_z self.a_cc = self.geo.a_cc self.a_cooling = self.geo.a_cooling self.v_cell = self.geo.v_cell self.l = self.geo.l self.delta = self.geo.delta # Tab geometry self.l_tab_n = self.geo.l_tab_n self.centre_y_tab_n = self.geo.centre_y_tab_n self.centre_z_tab_n = self.geo.centre_z_tab_n self.l_tab_p = self.geo.l_tab_p self.centre_y_tab_p = self.geo.centre_y_tab_p self.centre_z_tab_p = self.geo.centre_z_tab_p # Microscale geometry, see 'self._set_dimensional_parameters' for the # definition on the dimensional surface area to volume ratio based on # particle shape self.a_n = self.a_n_dim * self.R_n self.a_p = self.a_p_dim * self.R_p # Electrode Properties self.sigma_cn = (self.sigma_cn_dimensional * self.potential_scale / self.i_typ / self.L_x) self.sigma_n = self.sigma_n_dim * self.potential_scale / self.i_typ / self.L_x self.sigma_p = self.sigma_p_dim * self.potential_scale / self.i_typ / self.L_x self.sigma_cp = (self.sigma_cp_dimensional * self.potential_scale / self.i_typ / self.L_x) self.sigma_cn_prime = self.sigma_cn * self.delta**2 self.sigma_n_prime = self.sigma_n * self.delta self.sigma_p_prime = self.sigma_p * self.delta self.sigma_cp_prime = self.sigma_cp * self.delta**2 self.sigma_cn_dbl_prime = self.sigma_cn_prime * self.delta self.sigma_cp_dbl_prime = self.sigma_cp_prime * self.delta # Electrolyte Properties self.beta_surf = pybamm.Scalar(0) self.beta_surf_n = pybamm.Scalar(0) self.beta_surf_p = pybamm.Scalar(0) # Electrochemical Reactions self.C_dl_n = (self.C_dl_n_dimensional * self.potential_scale / self.j_scale_n / self.tau_discharge) self.C_dl_p = (self.C_dl_p_dimensional * self.potential_scale / self.j_scale_p / self.tau_discharge) # Electrical self.voltage_low_cut = ( self.voltage_low_cut_dimensional - (self.U_p_ref - self.U_n_ref)) / self.potential_scale self.voltage_high_cut = ( self.voltage_high_cut_dimensional - (self.U_p_ref - self.U_n_ref)) / self.potential_scale # Thermal self.rho_cn = self.therm.rho_cn self.rho_n = self.therm.rho_n self.rho_s = self.therm.rho_s self.rho_p = self.therm.rho_p self.rho_cp = self.therm.rho_cp self.rho_k = self.therm.rho_k self.rho = (self.rho_cn * self.l_cn + self.rho_n * self.l_n + self.rho_s * self.l_s + self.rho_p * self.l_p + self.rho_cp * self.l_cp) / self.l # effective volumetric heat capacity self.lambda_cn = self.therm.lambda_cn self.lambda_n = self.therm.lambda_n self.lambda_s = self.therm.lambda_s self.lambda_p = self.therm.lambda_p self.lambda_cp = self.therm.lambda_cp self.lambda_k = self.therm.lambda_k self.Theta = self.therm.Theta self.h_edge = self.therm.h_edge self.h_tab_n = self.therm.h_tab_n self.h_tab_p = self.therm.h_tab_p self.h_cn = self.therm.h_cn self.h_cp = self.therm.h_cp self.h_total = self.therm.h_total self.B = (self.i_typ * self.R * self.T_ref * self.tau_th_yz / (self.therm.rho_eff_dim * self.F * self.Delta_T * self.L_x)) self.T_amb_dim = self.therm.T_amb_dim self.T_amb = self.therm.T_amb # SEI parameters self.C_sei_reaction_n = (self.j_scale_n / self.m_sei_dimensional ) * pybamm.exp(-(self.F * self.U_n_ref / (2 * self.R * self.T_ref))) self.C_sei_reaction_p = (self.j_scale_p / self.m_sei_dimensional ) * pybamm.exp(-(self.F * self.U_n_ref / (2 * self.R * self.T_ref))) self.C_sei_solvent_n = ( self.j_scale_n * self.L_sei_0_dim / (self.c_sol_dimensional * self.F * self.D_sol_dimensional)) self.C_sei_solvent_p = ( self.j_scale_p * self.L_sei_0_dim / (self.c_sol_dimensional * self.F * self.D_sol_dimensional)) self.C_sei_electron_n = ( self.j_scale_n * self.F * self.L_sei_0_dim / (self.kappa_inner_dimensional * self.R * self.T_ref)) self.C_sei_electron_p = ( self.j_scale_p * self.F * self.L_sei_0_dim / (self.kappa_inner_dimensional * self.R * self.T_ref)) self.C_sei_inter_n = ( self.j_scale_n * self.L_sei_0_dim / (self.D_li_dimensional * self.c_li_0_dimensional * self.F)) self.C_sei_inter_p = ( self.j_scale_p * self.L_sei_0_dim / (self.D_li_dimensional * self.c_li_0_dimensional * self.F)) self.U_inner_electron = self.F * self.U_inner_dimensional / self.R / self.T_ref self.R_sei_n = (self.F * self.j_scale_n * self.R_sei_dimensional * self.L_sei_0_dim / self.R / self.T_ref) self.R_sei_p = (self.F * self.j_scale_p * self.R_sei_dimensional * self.L_sei_0_dim / self.R / self.T_ref) self.v_bar = self.V_bar_outer_dimensional / self.V_bar_inner_dimensional self.L_inner_0 = self.L_inner_0_dim / self.L_sei_0_dim self.L_outer_0 = self.L_outer_0_dim / self.L_sei_0_dim # ratio of SEI reaction scale to intercalation reaction self.Gamma_SEI_n = (self.V_bar_inner_dimensional * self.j_scale_n * self.tau_discharge) / (self.F * self.L_sei_0_dim) self.Gamma_SEI_p = (self.V_bar_inner_dimensional * self.j_scale_p * self.tau_discharge) / (self.F * self.L_sei_0_dim) # EC reaction self.C_ec_n = (self.L_sei_0_dim * self.j_scale_n / (self.F * self.c_ec_0_dim * self.D_ec_dim)) self.C_sei_ec_n = (self.F * self.k_sei_dim * self.c_ec_0_dim / self.j_scale_n * (pybamm.exp(-(self.F * (self.U_n_ref - self.U_sei_dim) / (2 * self.R * self.T_ref))))) self.beta_sei_n = self.a_n_dim * self.L_sei_0_dim * self.Gamma_SEI_n # Initial conditions self.epsilon_n_init = pybamm.Parameter("Negative electrode porosity") self.epsilon_s_init = pybamm.Parameter("Separator porosity") self.epsilon_p_init = pybamm.Parameter("Positive electrode porosity") self.epsilon_init = pybamm.Concatenation(self.epsilon_n, self.epsilon_s, self.epsilon_p) self.T_init = self.therm.T_init self.c_e_init = self.c_e_init_dimensional / self.c_e_typ
def _get_dj_ddeltaphi(self, variables): return pybamm.Scalar(0)
def D_p(self, c_s_p, T): "Dimensionless positive particle diffusivity" sto = c_s_p T_dim = self.Delta_T * T + self.T_ref return self.D_p_dimensional(sto, T_dim) / self.D_p_dimensional( pybamm.Scalar(1), self.T_ref)
def __init__(self, e_class): self.dim_dict = e_class.dim_dict self.nd_dict = e_class.nd_param.nd_param_dict self.model = pybamm.BaseModel() self.parameter_dict = {} self.pybam_val_dict = {} self.simulation_options = e_class.simulation_options self.dim_keys = self.nd_dict.keys() self.time = e_class.time_vec for key in self.dim_keys: self.parameter_dict[key] = pybamm.InputParameter(key) self.pybam_val_dict[key] = None self.current = pybamm.Variable("current") self.theta = pybamm.Variable("theta") if self.simulation_options["method"] == "dcv": Edc_forward = pybamm.t Edc_backwards = -(pybamm.t - 2 * self.parameter_dict["tr"]) E_t = self.parameter_dict["E_start"]+ \ (pybamm.t <= self.parameter_dict["tr"]) * Edc_forward + \ (pybamm.t > self.parameter_dict["tr"]) * Edc_backwards elif self.simulation_options["method"] == "sinusoidal": E_t = self.parameter_dict["E_start"] + self.parameter_dict[ "d_E"] + (self.parameter_dict["d_E"] * pybamm.sin( (self.parameter_dict["nd_omega"] * pybamm.t) + self.parameter_dict["phase"])) elif self.simulation_options["method"] == "ramped": Edc_forward = pybamm.t Edc_backwards = -(pybamm.t - 2 * self.parameter_dict["tr"]) E_t = self.parameter_dict["E_start"]+ \ (pybamm.t <= self.parameter_dict["tr"]) * Edc_forward + \ (pybamm.t > self.parameter_dict["tr"]) * Edc_backwards+\ (self.parameter_dict["d_E"]*pybamm.sin((self.parameter_dict["nd_omega"]*pybamm.t)+self.parameter_dict["phase"])) Er = E_t - (self.parameter_dict["Ru"] * self.current) ErE0 = Er - self.parameter_dict["E_0"] alpha = self.parameter_dict["alpha"] Cdlp = self.parameter_dict["Cdl"] * ( 1 + self.parameter_dict["CdlE1"] * Er + self.parameter_dict["CdlE2"] * (Er**2) + self.parameter_dict["CdlE3"] * (Er**3)) if "Cdlinv" not in e_class.optim_list: Cdlp = self.parameter_dict["Cdl"] * ( 1 + self.parameter_dict["CdlE1"] * Er + self.parameter_dict["CdlE2"] * (Er**2) + self.parameter_dict["CdlE3"] * (Er**3)) else: Cdlp=(pybamm.t <= self.parameter_dict["tr"]) *(self.parameter_dict["Cdl"]*(1+self.parameter_dict["CdlE1"]*Er+self.parameter_dict["CdlE2"]*(Er**2)+self.parameter_dict["CdlE3"]*(Er**3)))+\ (pybamm.t > self.parameter_dict["tr"]) *(self.parameter_dict["Cdlinv"]*(1+self.parameter_dict["CdlE1inv"]*Er+self.parameter_dict["CdlE2inv"]*(Er**2)+self.parameter_dict["CdlE3inv"]*(Er**3))) self.model.variables = {"current": self.current, "theta": self.theta} d_thetadt = ( (1 - self.theta) * self.parameter_dict["k_0"] * pybamm.exp( (1 - alpha) * ErE0)) - ( self.theta * self.parameter_dict["k_0"] * pybamm.exp( (-alpha) * ErE0)) dIdt = (E_t.diff(pybamm.t) - (self.current / Cdlp) + self.parameter_dict["gamma"] * d_thetadt * (1 / Cdlp)) / self.parameter_dict["Ru"] self.model.rhs = {self.current: dIdt, self.theta: d_thetadt} self.disc = pybamm.Discretisation() self.model.initial_conditions = { self.theta: pybamm.Scalar(1), self.current: pybamm.Scalar(0) } self.disc.process_model(self.model)
def test_process_model(self): model = pybamm.BaseModel() a = pybamm.Parameter("a") b = pybamm.Parameter("b") c = pybamm.Parameter("c") d = pybamm.Parameter("d") var1 = pybamm.Variable("var1") var2 = pybamm.Variable("var2") model.rhs = {var1: a * pybamm.grad(var1)} model.algebraic = {var2: c * var2} model.initial_conditions = {var1: b, var2: d} model.boundary_conditions = { var1: { "left": (c, "Dirichlet"), "right": (d, "Neumann") } } model.variables = { "var1": var1, "var2": var2, "grad_var1": pybamm.grad(var1), "d_var1": d * var1, } parameter_values = pybamm.ParameterValues({ "a": 1, "b": 2, "c": 3, "d": 42 }) parameter_values.process_model(model) # rhs self.assertIsInstance(model.rhs[var1], pybamm.Multiplication) self.assertIsInstance(model.rhs[var1].children[0], pybamm.Scalar) self.assertIsInstance(model.rhs[var1].children[1], pybamm.Gradient) self.assertEqual(model.rhs[var1].children[0].value, 1) # algebraic self.assertIsInstance(model.algebraic[var2], pybamm.Multiplication) self.assertIsInstance(model.algebraic[var2].children[0], pybamm.Scalar) self.assertIsInstance(model.algebraic[var2].children[1], pybamm.Variable) self.assertEqual(model.algebraic[var2].children[0].value, 3) # initial conditions self.assertIsInstance(model.initial_conditions[var1], pybamm.Scalar) self.assertEqual(model.initial_conditions[var1].value, 2) # boundary conditions bc_key = list(model.boundary_conditions.keys())[0] self.assertIsInstance(bc_key, pybamm.Variable) bc_value = list(model.boundary_conditions.values())[0] self.assertIsInstance(bc_value["left"][0], pybamm.Scalar) self.assertEqual(bc_value["left"][0].value, 3) self.assertIsInstance(bc_value["right"][0], pybamm.Scalar) self.assertEqual(bc_value["right"][0].value, 42) # variables self.assertEqual(model.variables["var1"].id, var1.id) self.assertIsInstance(model.variables["grad_var1"], pybamm.Gradient) self.assertTrue( isinstance(model.variables["grad_var1"].children[0], pybamm.Variable)) self.assertEqual(model.variables["d_var1"].id, (pybamm.Scalar(42, name="d") * var1).id) self.assertIsInstance(model.variables["d_var1"].children[0], pybamm.Scalar) self.assertTrue( isinstance(model.variables["d_var1"].children[1], pybamm.Variable)) # bad boundary conditions model = pybamm.BaseModel() model.algebraic = {var1: var1} x = pybamm.Parameter("x") model.boundary_conditions = {var1: {"left": (x, "Dirichlet")}} with self.assertRaises(KeyError): parameter_values.process_model(model)
def test_x_average(self): a = pybamm.Scalar(1) average_a = pybamm.x_average(a) self.assertEqual(average_a.id, a.id) average_broad_a = pybamm.x_average( pybamm.PrimaryBroadcast(a, ["negative electrode"])) self.assertEqual(average_broad_a.evaluate(), np.array([1])) conc_broad = pybamm.Concatenation( pybamm.PrimaryBroadcast(1, ["negative electrode"]), pybamm.PrimaryBroadcast(2, ["separator"]), pybamm.PrimaryBroadcast(3, ["positive electrode"]), ) average_conc_broad = pybamm.x_average(conc_broad) self.assertIsInstance(average_conc_broad, pybamm.Division) for domain in [ ["negative electrode"], ["separator"], ["positive electrode"], ["negative electrode", "separator", "positive electrode"], ]: a = pybamm.Symbol("a", domain=domain) x = pybamm.SpatialVariable("x", domain) av_a = pybamm.x_average(a) self.assertIsInstance(av_a, pybamm.Division) self.assertIsInstance(av_a.children[0], pybamm.Integral) self.assertEqual(av_a.children[0].integration_variable[0].domain, x.domain) self.assertEqual(av_a.domain, []) a = pybamm.Symbol("a", domain="new domain") av_a = pybamm.x_average(a) self.assertEqual(av_a.domain, []) self.assertIsInstance(av_a, pybamm.Division) self.assertIsInstance(av_a.children[0], pybamm.Integral) self.assertEqual(av_a.children[0].integration_variable[0].domain, a.domain) self.assertIsInstance(av_a.children[1], pybamm.Integral) self.assertEqual(av_a.children[1].integration_variable[0].domain, a.domain) self.assertEqual(av_a.children[1].children[0].id, pybamm.ones_like(a).id) # x-average of symbol that evaluates on edges raises error symbol_on_edges = pybamm.PrimaryBroadcastToEdges(1, "domain") with self.assertRaisesRegex( ValueError, "Can't take the x-average of a symbol that evaluates on edges" ): pybamm.x_average(symbol_on_edges) # Particle domains a = pybamm.Symbol( "a", domain="negative particle", auxiliary_domains={"secondary": "negative electrode"}, ) av_a = pybamm.x_average(a) self.assertEqual(a.domain, ["negative particle"]) self.assertIsInstance(av_a, pybamm.Division) self.assertIsInstance(av_a.children[0], pybamm.Integral) self.assertEqual(av_a.children[1].id, pybamm.geometric_parameters.l_n.id) a = pybamm.Symbol( "a", domain="positive particle", auxiliary_domains={"secondary": "positive electrode"}, ) av_a = pybamm.x_average(a) self.assertEqual(a.domain, ["positive particle"]) self.assertIsInstance(av_a, pybamm.Division) self.assertIsInstance(av_a.children[0], pybamm.Integral) self.assertEqual(av_a.children[1].id, pybamm.geometric_parameters.l_p.id)
def test_model_solver_dae_nonsmooth_python(self): model = pybamm.BaseModel() model.convert_to_format = "python" whole_cell = ["negative electrode", "separator", "positive electrode"] var1 = pybamm.Variable("var1", domain=whole_cell) var2 = pybamm.Variable("var2", domain=whole_cell) discontinuity = 0.6 def nonsmooth_rate(t): return 0.1 * (t < discontinuity) + 0.1 def nonsmooth_mult(t): return (t < discontinuity) + 1.0 rate = nonsmooth_rate(pybamm.t) mult = nonsmooth_mult(pybamm.t) # put in an extra heaviside with no time dependence, this should be ignored by # the solver i.e. no extra discontinuities added model.rhs = {var1: rate * var1 + (var1 < 0)} model.algebraic = {var2: mult * var1 - var2} model.initial_conditions = {var1: 1, var2: 2} model.events = [ pybamm.Event("var1 = 1.5", pybamm.min(var1 - 1.5)), pybamm.Event("var2 = 2.5", pybamm.min(var2 - 2.5)), pybamm.Event( "nonsmooth rate", pybamm.Scalar(discontinuity), pybamm.EventType.DISCONTINUITY, ), pybamm.Event( "nonsmooth mult", pybamm.Scalar(discontinuity), pybamm.EventType.DISCONTINUITY, ), ] disc = get_discretisation_for_testing() disc.process_model(model) # Solve solver = pybamm.ScikitsDaeSolver(rtol=1e-8, atol=1e-8, root_method="lm") # create two time series, one without a time point on the discontinuity, # and one with t_eval1 = np.linspace(0, 5, 10) t_eval2 = np.insert(t_eval1, np.searchsorted(t_eval1, discontinuity), discontinuity) solution1 = solver.solve(model, t_eval1) solution2 = solver.solve(model, t_eval2) # check time vectors for solution in [solution1, solution2]: # time vectors are ordered self.assertTrue(np.all(solution.t[:-1] <= solution.t[1:])) # time value before and after discontinuity is an epsilon away dindex = np.searchsorted(solution.t, discontinuity) value_before = solution.t[dindex - 1] value_after = solution.t[dindex] self.assertEqual(value_before + sys.float_info.epsilon, discontinuity) self.assertEqual(value_after - sys.float_info.epsilon, discontinuity) # both solution time vectors should have same number of points self.assertEqual(len(solution1.t), len(solution2.t)) # check solution for solution in [solution1, solution2]: np.testing.assert_array_less(solution.y[0, :-1], 1.5) np.testing.assert_array_less(solution.y[-1, :-1], 2.5) var1_soln = np.exp(0.2 * solution.t) y0 = np.exp(0.2 * discontinuity) var1_soln[solution.t > discontinuity] = y0 * np.exp( 0.1 * (solution.t[solution.t > discontinuity] - discontinuity)) var2_soln = 2 * var1_soln var2_soln[solution.t > discontinuity] = var1_soln[ solution.t > discontinuity] np.testing.assert_allclose(solution.y[0], var1_soln, rtol=1e-06) np.testing.assert_allclose(solution.y[-1], var2_soln, rtol=1e-06)
def test_scalar_id(self): a1 = pybamm.Scalar(4) a2 = pybamm.Scalar(4) self.assertEqual(a1.id, a2.id) a3 = pybamm.Scalar(5) self.assertNotEqual(a1.id, a3.id)
def test_model_solver_dae_nonsmooth(self): whole_cell = ["negative electrode", "separator", "positive electrode"] var1 = pybamm.Variable("var1", domain=whole_cell) var2 = pybamm.Variable("var2") discontinuity = 0.6 # Create three different models with the same solution, each expressing the # discontinuity in a different way # first model explicitly adds a discontinuity event def nonsmooth_rate(t): return 0.1 * (t < discontinuity) + 0.1 rate = pybamm.Function(nonsmooth_rate, pybamm.t) model1 = pybamm.BaseModel() model1.rhs = {var1: rate * var1} model1.algebraic = {var2: var2} model1.initial_conditions = {var1: 1, var2: 0} model1.events = [ pybamm.Event("var1 = 1.5", pybamm.min(var1 - 1.5)), pybamm.Event( "nonsmooth rate", pybamm.Scalar(discontinuity), pybamm.EventType.DISCONTINUITY, ), ] # second model implicitly adds a discontinuity event via a heaviside function model2 = pybamm.BaseModel() model2.rhs = {var1: (0.1 * (pybamm.t < discontinuity) + 0.1) * var1} model2.algebraic = {var2: var2} model2.initial_conditions = {var1: 1, var2: 0} model2.events = [pybamm.Event("var1 = 1.5", pybamm.min(var1 - 1.5))] # third model implicitly adds a discontinuity event via another heaviside # function model3 = pybamm.BaseModel() model3.rhs = {var1: (-0.1 * (discontinuity < pybamm.t) + 0.2) * var1} model3.algebraic = {var2: var2} model3.initial_conditions = {var1: 1, var2: 0} model3.events = [pybamm.Event("var1 = 1.5", pybamm.min(var1 - 1.5))] for model in [model1, model2, model3]: disc = get_discretisation_for_testing() disc.process_model(model) # Solve solver = pybamm.ScikitsDaeSolver(rtol=1e-8, atol=1e-8) # create two time series, one without a time point on the discontinuity, # and one with t_eval1 = np.linspace(0, 5, 10) t_eval2 = np.insert(t_eval1, np.searchsorted(t_eval1, discontinuity), discontinuity) solution1 = solver.solve(model, t_eval1) solution2 = solver.solve(model, t_eval2) # check time vectors for solution in [solution1, solution2]: # time vectors are ordered self.assertTrue(np.all(solution.t[:-1] <= solution.t[1:])) # time value before and after discontinuity is an epsilon away dindex = np.searchsorted(solution.t, discontinuity) value_before = solution.t[dindex - 1] value_after = solution.t[dindex] self.assertEqual(value_before + sys.float_info.epsilon, discontinuity) self.assertEqual(value_after - sys.float_info.epsilon, discontinuity) # both solution time vectors should have same number of points self.assertEqual(len(solution1.t), len(solution2.t)) # check solution for solution in [solution1, solution2]: np.testing.assert_array_less(solution.y[0, :-1], 1.5) np.testing.assert_array_less(solution.y[-1, :-1], 2.5) np.testing.assert_equal(solution.t_event[0], solution.t[-1]) np.testing.assert_array_equal(solution.y_event[:, 0], solution.y[:, -1]) var1_soln = np.exp(0.2 * solution.t) y0 = np.exp(0.2 * discontinuity) var1_soln[solution.t > discontinuity] = y0 * np.exp( 0.1 * (solution.t[solution.t > discontinuity] - discontinuity)) np.testing.assert_allclose(solution.y[0], var1_soln, rtol=1e-06)
def test_to_equation(self): a = pybamm.Scalar(3) self.assertEqual(str(a.to_equation()), "3.0")
def test_1D_different_domains(self): # Negative electrode domain var = pybamm.Variable("var", domain=["negative electrode"]) x = pybamm.SpatialVariable("x", domain=["negative electrode"]) disc = tests.get_discretisation_for_testing() disc.set_variable_slices([var]) x_sol = disc.process_symbol(x).entries[:, 0] var_sol = disc.process_symbol(var) t_sol = [0] y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5 sol = pybamm.Solution(t_sol, y_sol) pybamm.ProcessedSymbolicVariable(var_sol, sol) # Particle domain var = pybamm.Variable("var", domain=["negative particle"]) r = pybamm.SpatialVariable("r", domain=["negative particle"]) disc = tests.get_discretisation_for_testing() disc.set_variable_slices([var]) r_sol = disc.process_symbol(r).entries[:, 0] var_sol = disc.process_symbol(var) t_sol = [0] y_sol = np.ones_like(r_sol)[:, np.newaxis] * 5 sol = pybamm.Solution(t_sol, y_sol) pybamm.ProcessedSymbolicVariable(var_sol, sol) # Current collector domain var = pybamm.Variable("var", domain=["current collector"]) z = pybamm.SpatialVariable("z", domain=["current collector"]) disc = tests.get_1p1d_discretisation_for_testing() disc.set_variable_slices([var]) z_sol = disc.process_symbol(z).entries[:, 0] var_sol = disc.process_symbol(var) t_sol = [0] y_sol = np.ones_like(z_sol)[:, np.newaxis] * 5 sol = pybamm.Solution(t_sol, y_sol) pybamm.ProcessedSymbolicVariable(var_sol, sol) # Other domain var = pybamm.Variable("var", domain=["line"]) x = pybamm.SpatialVariable("x", domain=["line"]) geometry = pybamm.Geometry( { "line": { "primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(1)}} } } ) submesh_types = {"line": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh)} var_pts = {x: 10} mesh = pybamm.Mesh(geometry, submesh_types, var_pts) disc = pybamm.Discretisation(mesh, {"line": pybamm.FiniteVolume()}) disc.set_variable_slices([var]) x_sol = disc.process_symbol(x).entries[:, 0] var_sol = disc.process_symbol(var) t_sol = [0] y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5 sol = pybamm.Solution(t_sol, y_sol) pybamm.ProcessedSymbolicVariable(var_sol, sol) # 2D fails var = pybamm.Variable( "var", domain=["negative particle"], auxiliary_domains={"secondary": "negative electrode"}, ) r = pybamm.SpatialVariable( "r", domain=["negative particle"], auxiliary_domains={"secondary": "negative electrode"}, ) disc = tests.get_p2d_discretisation_for_testing() disc.set_variable_slices([var]) r_sol = disc.process_symbol(r).entries[:, 0] var_sol = disc.process_symbol(var) t_sol = [0] y_sol = np.ones_like(r_sol)[:, np.newaxis] * 5 sol = pybamm.Solution(t_sol, y_sol) with self.assertRaisesRegex(NotImplementedError, "Shape not recognized"): pybamm.ProcessedSymbolicVariable(var_sol, sol)
def test_simplify_heaviside(self): a = pybamm.Scalar(1) b = pybamm.Scalar(2) self.assertEqual((a < b).simplify().id, pybamm.Scalar(1).id) self.assertEqual((a >= b).simplify().id, pybamm.Scalar(0).id)