def stddev_ensemble(self, ddof=0): """ Generate a single Results object with a Trajectory that is made of the sample standard deviations of all trajectories' outputs. :param ddof: Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of trajectories. Sample standard deviation uses ddof of 1. Defaults to population standard deviation where ddof is 0. :type ddof: int :returns: the Results object """ from math import sqrt trajectory_list = self.data number_of_trajectories = len(trajectory_list) if ddof == number_of_trajectories: from gillespy2.core import log log.warning( "ddof must be less than the number of trajectories. Using ddof of 0" ) ddof = 0 average_list = self.average_ensemble().data[0] output_trajectory = Trajectory( data={}, model=trajectory_list[0].model, solver_name=trajectory_list[0].solver_name) for species in trajectory_list[ 0]: # Initialize the output to be the same size as the inputs output_trajectory[species] = [0] * len(trajectory_list[0][species]) output_trajectory['time'] = trajectory_list[0]['time'] for i in range(0, number_of_trajectories): trajectory_dict = trajectory_list[i] for species in trajectory_dict: if species == 'time': continue for k in range(0, len(output_trajectory['time'])): output_trajectory[species][k] += (trajectory_dict[species][k] - average_list[species][k])\ * (trajectory_dict[species][k] - average_list[species][k]) for species in output_trajectory: # Divide for mean of every value in output Trajectory if species == 'time': continue for i in range(0, len(output_trajectory[species])): output_trajectory[species][i] /= (number_of_trajectories - ddof) output_trajectory[species][i] = sqrt( output_trajectory[species][i]) output_results = Results(data=[ output_trajectory ]) # package output_trajectory in a Results object return output_results
def valid_graph_params(live_output_options): if live_output_options['type'] not in ['progress', 'graph', 'text']: from gillespy2.core.gillespyError import SimulationError raise SimulationError( "Invalid input to 'live_output', please check spelling and ensure input is" " lower case.") if 'interval' not in live_output_options: live_output_options['interval'] = 1 elif live_output_options['interval'] < 0: log.warning( "In LiveGraphing live_output_options, got \"interval\" = \"{0}\". setting interval = 1" .format(live_output_options['interval'])) live_output_options['interval'] = 1 if live_output_options[ 'type'] == "graph" and live_output_options['interval'] < 1: log.warning( "In LiveGraphing live_output_options, got \"interval\" = \"{0}\". Consider using an interval >= 1 " "when displaying graphs".format(live_output_options['interval'])) if 'clear_output' not in live_output_options: if live_output_options['type'] == "graph" or live_output_options[ 'type'] == "progress": live_output_options['clear_output'] = True else: live_output_options['clear_output'] = False if 'file_path' not in live_output_options: live_output_options['file_path'] = None elif live_output_options['type'] == "graph" and live_output_options[ 'file_path'] is not None: live_output_options['type'] = "figure"
def build_from_solver_results(cls, solver, live_output_options): """ Build a gillespy2.Results object using the provided solver results. :param solver: The solver used to run the simulation. :type solver: gillespy2.GillesPySolver :param live_output_options: dictionary contains options for live_output. By default {"interval":1}. "interval" specifies seconds between displaying. "clear_output" specifies if display should be refreshed with each display :type live_output_options: dict """ if solver.rc == 33: from gillespy2.core import log log.warning('GillesPy2 simulation exceeded timeout.') if hasattr(solver.result[0], 'shape'): return solver.result if len(solver.result) > 0: results_list = [] for i in range(0, len(solver.result)): temp = Trajectory(data=solver.result[i], model=solver.model, solver_name=solver.name, rc=solver.rc) results_list.append(temp) results = Results(results_list) if "type" in live_output_options.keys( ) and live_output_options['type'] == "graph": results.plot() return results else: raise ValueError( "number_of_trajectories must be non-negative and non-zero")
def __compile(self): # Use makefile. if self.resume: if self.resume[0].model != self.model: raise gillespyError.ModelError( 'When resuming, one must not alter the model being resumed.' ) else: built = subprocess.run( ["make", "-C", self.output_directory, 'UserSimulation'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: try: cleaned = subprocess.run( ["make", "-C", self.output_directory, 'cleanSimulation'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) built = subprocess.run( ["make", "-C", self.output_directory, 'UserSimulation'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except KeyboardInterrupt: log.warning( "Solver has been interrupted during compile time, unexpected behavior may occur." ) if built.returncode == 0: self.__compiled = True else: raise gillespyError.BuildError( "Error encountered while compiling file:\nReturn code: " "{0}.\nError:\n{1}\n{2}\n".format( built.returncode, built.stdout.decode('utf-8'), built.stderr.decode('utf-8')))
def __execute(self, target: str, **kwargs): # Default make arguments. args_dict = { "cbase_dir": str(self.cbase_dir.resolve()), "obj_dir": str(self.obj_dir.resolve()), "output_dir": str(self.output_dir.resolve()), "output_file": str(self.output_dir.joinpath(self.output_file).resolve()) } # Overwrite keys supplied in **kwargs. for key, value in kwargs.items(): args_dict[key] = value # Create the emake arguments. Note, arguments are UPPERCASE. make_args = [(f"{key.upper()}={value}") for key, value in args_dict.items()] # Create the make command. make_cmd = ["make", "-C", str(self.cbase_dir), "-f", str(self.makefile), target] + make_args try: result = subprocess.run(make_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except KeyboardInterrupt: log.warning(f"Makefile was interrupted during execution of target: '{target}', unexpected behavior may occur.") if result.returncode == 0: return raise gillespyError.BuildError(f"Error encountered during execution of Makefile target: '{target}'.\n" f"Return code: {result.returncode}" f"- stdout: {result.stdout.decode('utf-8', errors='ignore')}\n" f"- stderr: {result.stderr.decode('utf-8', errors='ignore')}\n" f"- make_cmd: {make_cmd}\n" f"- os.listdir({os.path.join(self.cbase_dir,'template')}): {os.listdir(os.path.join(self.cbase_dir,'template'))}\n")
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, show_labels=True, integrator='lsoda', integrator_options={}, timeout=None, **kwargs): """ :param model: gillespy2.model class object :param t: end time of simulation :param number_of_trajectories: Should be 1. This is deterministic and will always have same results :param increment: time step increment for plotting :param show_labels: If true, simulation returns a list of trajectories, where each list entry is a dictionary containing key value pairs of species : trajectory. If false, returns a numpy array with shape [traj_no, time, species] :param integrator: integrator to be used form scipy.integrate.ode. Options include 'vode', 'zvode', 'lsoda', 'dopri5', and 'dop835'. For more details, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html :param integrator_options: a dictionary containing options to the scipy integrator. for a list of options, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html. Example use: {max_step : 0, rtol : .01} :param kwargs: :return: """ if isinstance(self, type): self = BasicODESolver() self.stop_event = Event() if timeout is not None and timeout <= 0: timeout = None if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) if number_of_trajectories > 1: log.warning( "Generating duplicate trajectories for model with ODE Solver. Consider running with only 1 trajectory." ) sim_thread = Thread(target=self.___run, args=(model, ), kwargs={ 't': t, 'number_of_trajectories': number_of_trajectories, 'increment': increment, 'show_labels': show_labels, 'timeout': timeout }) try: sim_thread.start() sim_thread.join(timeout=timeout) self.stop_event.set() while self.result is None: pass except: pass if hasattr(self, 'has_raised_exception'): raise self.has_raised_exception return self.result, self.rc
def __getattribute__(self, key): if key in ('model', 'solver_name', 'rc', 'status'): if len(self.data) > 1: from gillespy2.core import log msg = f"Results is of type list. Use results[i]['{key}'] instead of results['{key}']" log.warning(msg) return getattr(Results.__getattribute__(self, key='data')[0], key) else: return UserList.__getattribute__(self, key)
def _validate_kwargs(self, **kwargs): """ Validate any additional kwargs passed to the model. If any exist, warn the user. """ if len(kwargs) == 0: return for key, val in kwargs.items(): log.warning(f"Unsupported keyword argument for solver {self.name}: {key}")
def __getitem__(self, key): if key == 'data': return UserList.__getitem__(self, key) if isinstance(key, str): if len(self.data) > 1: from gillespy2.core import log msg = f"Results is of type list. Use results[i]['{key}'] instead of results['{key}']" log.warning(msg) return self.data[0][key] else: return (UserList.__getitem__(self, key)) raise KeyError(key)
def create_mass_action(self, *args, **kwargs): """ Initializes the mass action propensity function given self.reactants and a single parameter value. """ from gillespy2.core import log log.warning(""" Reaction.create_mass_action has been deprecated. Future releases of GillesPy2 may not support this feature. """) self._create_mass_action(*args, **kwargs)
def verify(self, *args, **kwargs): """ Check if the reaction is properly formatted. Does nothing on sucesss, raises and error on failure. """ from gillespy2.core import log log.warning(""" Reaction.verify has been deprecated. Future releases of GillesPy2 may not support this feature. Use Reaction.validate instead. """) self.validate(*args, **kwargs)
def run(cls, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, profile=False, show_labels=True, max_steps=0, **kwargs): """ :param model: gillespy2.model class object :param t: end time of simulation :param number_of_trajectories: Should be 1. This is deterministic and will always have same results :param increment: time step increment for plotting :param seed: random seed, has no effect :param debug: not implemented :param profile: not implemented :param show_labels: not implemented :param kwargs: :return: """ if number_of_trajectories > 1: log.warning( "Generating duplicate trajectories for model with ODE Solver. Consider running with only 1 trajectory." ) start_state = [ model.listOfSpecies[species].initial_value for species in model.listOfSpecies ] timeline = np.linspace(0, t, (t // increment + 1)) result = odeint(BasicODESolver.rhs, start_state, timeline, args=(model, ), mxstep=max_steps) if show_labels: results_as_dict = {'time': timeline} for i, species in enumerate(model.listOfSpecies): results_as_dict[species] = result[:, i] results = [results_as_dict] * number_of_trajectories else: result = np.hstack((np.expand_dims(timeline, -1), result)) results = np.stack([result] * number_of_trajectories, axis=0) return results
def Annotate(self, *args, **kwargs): """ Add an annotation to this reaction (deprecated). :param annotation: Annotation note to be added to reaction :type annotation: str """ from gillespy2.core import log log.warning(""" Reaction.Annotate has been deprecated. Future releases of GillesPy2 may not support this feature. Use Reaction.set_annotation instead. """) self.set_annotation(*args, **kwargs)
def __getitem__(self, key): if type(key) is int: from gillespy2.core import log species = list(self.data.keys())[key] msg = "Trajectory is of type dictionary." msg += f"Use trajectory['[{species}]'] instead of trajectory[{key}]['{species}']" msg += f"Retrieving trajectory['[{species}]']" log.warning(msg) return self.data[species] if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key)
def c_solver_resume(timeStopped, simulation_data, t, resume=None): """ If a simulation is being resumed from a previous simulation, this function is called in the VariableSSACSolver, or SSACSolver :param timeStopped: The time that a simulation was stopped, originally attained from the results_buffer returned by the CPP simulation. :param simulation_data: The current simulation data, attained after parsing the results in the VariableSSACSolver or SSACSolver. :param t: The end time for the resume simulation, originally set in model.run(t=...) :param resume: The previous simulations data :type resume: gillespy2.core.result object :return: Combined data of the previous simulation, and the current simulation """ # If simulation was paused/KeyboardInterrupt if timeStopped != 0: cutoff = find_time(simulation_data[0]['time'], timeStopped) if cutoff == 0 or cutoff == 1: log.warning( 'You have paused the simulation too early, and no points have been calculated past' ' initial values. A graphic display will not produce expected results.' ) else: cutoff -= 1 for i in simulation_data[0]: simulation_data[0][i] = simulation_data[0][i][:cutoff] if resume is not None: resumeTime = float(resume['time'][-1]) step = resumeTime - resume['time'][-2] if timeStopped == 0: timeSpan = np.arange(resumeTime, t + resumeTime + step, step) else: timeSpan = np.arange(resumeTime + step, timeStopped + resumeTime + step, step) simulation_data[0]['time'] = timeSpan if resume is not None: # If resuming, combine old pause with new data, and delete any excess null data for i in simulation_data[0]: oldData = resume[i] newData = simulation_data[0][i] simulation_data[0][i] = np.concatenate((oldData, newData), axis=None) if len(simulation_data[0]['time']) != len(simulation_data[0][i]): simulation_data[0]['time'] = simulation_data[0]['time'][:-1] return simulation_data
def __add__(self, other): combined_data = Results(data=(self.data + other.data)) consistent_solver = combined_data._validate_solver() consistent_model = combined_data._validate_model() if not consistent_solver: from gillespy2.core import log log.warning( "Results objects contain Trajectory objects from multiple solvers." ) if not consistent_model: raise ValidationError( 'Results objects contain Trajectory objects from multiple models.' ) return combined_data
def addReactant(self, *args, **kwargs): """ Add a reactant to this reaction (deprecated) :param species: reactant Species object :type species: spatialpy.core.species.Species :param stoichiometry: Stoichiometry of this participant reactant :type stoichiometry: int """ from gillespy2.core import log log.warning(""" Reaction.addReactant has been deprecated. Future releases of GillesPy2 may not support this feature. Use Reaction.add_reactant instead. """) self.add_reactant(*args, **kwargs)
def addProduct(self, *args, **kwargs): """ Add a product to this reaction (deprecated) :param species: Species object to be produced by the reaction :type species: spatialpy.core.species.Species :param stoichiometry: Stoichiometry of this product. :type stoichiometry: int """ from gillespy2.core import log log.warning(""" Reaction.addProduct has been deprecated. Future releases of GillesPy2 may not support this feature. Use Reaction.add_product instead. """) self.add_product(*args, **kwargs)
def setType(self, rxntype): """ Sets reaction type to either "mass-action" or "customized" (deprecated) :param rxntype: Either "mass-action" or "customized" :type rxntype: str """ from gillespy2.core import log log.warning(""" Reaction.setType has been deprecated. Future releases of GillesPy2 may not support this feature. Set propensity_function for Reaction.type="customized" or rate for Reaction.type="mass-action" instead. """) if rxntype.lower() not in ('mass-action', 'customized'): raise ReactionError("Invalid reaction type.") self.type = rxntype.lower() self.massaction = False if self.type == 'customized' else True
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, show_labels=True, timeout=None, **kwargs): """ Run the SSA algorithm using a NumPy for storing the data in arrays and generating the timeline. :param model: The model on which the solver will operate. :param t: The end time of the solver. :param number_of_trajectories: The number of times to sample the chemical master equation. Each trajectory will be returned at the end of the simulation. :param increment: The time step of the solution. :param seed: The random seed for the simulation. Defaults to None. :param debug: Set to True to provide additional debug information about the simulation. :param show_labels: Use names of species as index of result object rather than position numbers. :return: a list of each trajectory simulated. """ if isinstance(self, type): self = NumPySSASolver() self.stop_event = Event() if timeout is not None and timeout <= 0: timeout = None if len(kwargs) > 0: for key in kwargs: log.warning('Unsupported keyword argument to {0} solver: {1}'.format(self.name, key)) sim_thread = Thread(target=self.___run, args=(model,), kwargs={'t':t, 'number_of_trajectories':number_of_trajectories, 'increment':increment, 'seed':seed, 'debug':debug, 'show_labels':show_labels, 'timeout':timeout}) try: sim_thread.start() sim_thread.join(timeout=timeout) self.stop_event.set() while self.result is None: pass except: pass if hasattr(self, 'has_raised_exception'): raise self.has_raised_exception return self.result, self.rc
def use_rate_rule(self, rate_rule: "RateRule") -> "SanitizedModel": """ Attach the given rate rule to the sanitized model. The rate rule will automatically be validated and sanitized before being applied. :param rate_rule: GillesPy2 RateRule object to attach to the sanitized model. :type rate_rule: gillespy2.RateRule :returns: Pass-through of sanitized model object. :rtype: SanitizedModel """ if isinstance(rate_rule.variable, Species): variable = rate_rule.variable elif not isinstance(rate_rule.variable, Parameter) and \ rate_rule.variable in self.model.listOfSpecies.keys(): variable = self.model.get_species(rate_rule.variable) else: errmsg = """ Parameters are not valid variables for the TauHybridCSolver. In order to use this variable it will need to be a gillespy2.Species. """ raise SimulationError(errmsg) if variable.name in self.species_names: sanitized_name = self.species_names.get(variable.name) if sanitized_name in self.rate_rules: log.warning( f"Duplicate rate rule variable found in C++ solver: {variable}" ) rr_sanitized = self.expr.getexpr_cpp(rate_rule.formula) if rr_sanitized is not None: self.rate_rules[sanitized_name] = rr_sanitized else: log.warning( f"Could not sanitize rate rule formula expression: {rate_rule.formula}" ) return self
def valid_graph_params(live_output_options): if 'interval' not in live_output_options: live_output_options['interval'] = 1 elif live_output_options['interval'] < 0: log.warning( "In LiveGraphing live_output_options, got \"interval\" = \"{0}\". setting interval = 1" .format(live_output_options['interval'])) live_output_options['interval'] = 1 if live_output_options[ 'type'] == "graph" and live_output_options['interval'] < 1: log.warning( "In LiveGraphing live_output_options, got \"interval\" = \"{0}\". Consider using an interval >= 1 " "when displaying graphs".format(live_output_options['interval'])) if 'clear_output' not in live_output_options: if live_output_options['type'] == "graph" or live_output_options[ 'type'] == "progress": live_output_options['clear_output'] = True else: live_output_options['clear_output'] = False
def _make_resume_data(self, time_stopped: int, simulation_data: numpy.ndarray, t: int): """ If the simulation was paused then the output data needs to be trimmed to allow for resume. In the event the simulation was not paused, no data is changed. """ # No need to create resume data if the simulation completed without interruption. # Note that, currently, some C++ solvers write 0 out as the "time stopped" by default. # This is likely to change in the future. if not time_stopped < t or time_stopped == 0: return simulation_data # Find the index of the time step value which is closest to the time stopped. cutoff = numpy.searchsorted(simulation_data[-1]["time"], float(time_stopped)) if cutoff < 2: log.warning('You have paused the simulation too early, and no points have been calculated past' ' initial values. A graphic display will not produce expected results.') # Break off any extraneous data which goes past the cutoff time. # Any data in this case is assumed to be untrusted. for entry_name, entry_data in simulation_data[-1].items(): simulation_data[-1][entry_name] = entry_data[:cutoff] return simulation_data
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, profile=False, live_output=None, live_output_options={}, timeout=None, resume=None, tau_tol=0.03, **kwargs): """ Function calling simulation of the model. This is typically called by the run function in GillesPy2 model objects and will inherit those parameters which are passed with the model as the arguments this run function. :param model: GillesPy2 model object to simulate :type model: gillespy2.Model :param t: Simulation run time :type t: int :param number_of_trajectories: Number of trajectories to simulate :type number_of_trajectories: int :param increment: Save point increment for recording data :type increment: float :param seed: The random seed for the simulation. Optional, defaults to None :type seed: int :param debug: Set to True to provide additional debug information about the simulation :type debug: bool :param profile: Set to True to provide information about step size (tau) taken at each step. :type profile: bool :param live_output: The type of output to be displayed by solver. Can be "progress", "text", or "graph". :type live_output: str :param live_output_options: COntains options for live_output. By default {"interval":1}. "interval" specifies seconds between displaying. "clear_output" specifies if display should be refreshed with each display. :type live_output_options: dict :param timeout: :param resume: :param tau_tol: :param kwargs: :return: """ if isinstance(self, type): self = TauLeapingSolver(debug=debug, profile=profile) self.stop_event = Event() self.pause_event = Event() if timeout is not None and timeout <= 0: timeout = None if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) # create numpy array for timeline if resume is not None: # start where we last left off if resuming a simulatio lastT = resume['time'][-1] step = lastT - resume['time'][-2] timeline = np.arange(lastT, t + step, step) else: timeline = np.linspace(0, t, int(round(t / increment + 1))) species = list(model._listOfSpecies.keys()) trajectory_base, tempSpecies = nputils.numpy_trajectory_base_initialization( model, number_of_trajectories, timeline, species, resume=resume) # curr_time and curr_state are list of len 1 so that __run receives reference if resume is not None: total_time = [resume['time'][-1]] else: total_time = [0] curr_state = [None] live_grapher = [None] sim_thread = Thread(target=self.___run, args=( model, curr_state, total_time, timeline, trajectory_base, live_grapher, ), kwargs={ 't': t, 'number_of_trajectories': number_of_trajectories, 'increment': increment, 'seed': seed, 'debug': debug, 'resume': resume, 'timeout': timeout, 'tau_tol': tau_tol }) try: sim_thread.start() if resume is not None: resumeTest = True # If resuming, relay this information to live_grapher else: resumeTest = False if live_output is not None: import gillespy2.core.liveGraphing live_output_options['type'] = live_output gillespy2.core.liveGraphing.valid_graph_params( live_output_options) live_grapher[0] = liveGraphing.LiveDisplayer( model, timeline, number_of_trajectories, live_output_options, resume=resumeTest) display_timer = liveGraphing.RepeatTimer( live_output_options['interval'], live_grapher[0].display, args=( curr_state, total_time, trajectory_base, )) display_timer.start() sim_thread.join(timeout=timeout) if live_grapher[0] is not None: display_timer.cancel() self.stop_event.set() while self.result is None: pass except KeyboardInterrupt: if live_output: display_timer.cancel() self.pause_event.set() while self.result is None: pass if hasattr(self, 'has_raised_exception'): raise self.has_raised_exception return self.result, self.rc
def run(self=None, model=None, t=20, number_of_trajectories=1, timeout=0, increment=0.05, seed=None, debug=False, profile=False, resume=None, **kwargs): pause = False if resume is not None: if t < resume['time'][-1]: raise gillespyError.ExecutionError( "'t' must be greater than previous simulations end time, or set in the run() method as the " "simulations next end time") if resume is not None: self = SSACSolver(model, resume=resume) else: if self is None or self.model is None: self = SSACSolver(model) if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) unsupported_sbml_features = { 'Rate Rules': len(model.listOfRateRules), 'Assignment Rules': len(model.listOfAssignmentRules), 'Events': len(model.listOfEvents), 'Function Definitions': len(model.listOfFunctionDefinitions) } detected_features = [] for feature, count in unsupported_sbml_features.items(): if count: detected_features.append(feature) if len(detected_features): raise gillespyError.ModelError( 'Could not run Model. SBML Feature: {} not supported by SSACSolver.' .format(detected_features)) if self.__compiled: self.simulation_data = None if resume is not None: t = abs(t - resume['time'][-1]) number_timesteps = int(round(t / increment + 1)) # Execute simulation. args = [ os.path.join(self.output_directory, 'UserSimulation'), '-trajectories', str(number_of_trajectories), '-timesteps', str(number_timesteps), '-end', str(t) ] if seed is not None: if isinstance(seed, int): args.append('-seed') args.append(str(seed)) else: seed_int = int(seed) if seed_int > 0: args.append('-seed') args.append(str(seed_int)) else: raise gillespyError.ModelError( "seed must be a positive integer") # begin subprocess c simulation with timeout (default timeout=0 will not timeout) with subprocess.Popen(args, stdout=subprocess.PIPE, start_new_session=True) as simulation: return_code = 0 try: if timeout > 0: stdout, stderr = simulation.communicate( timeout=timeout) else: stdout, stderr = simulation.communicate() return_code = simulation.wait() except KeyboardInterrupt: os.killpg( simulation.pid, signal.SIGINT) # send signal to the process group stdout, stderr = simulation.communicate() pause = True return_code = 33 except subprocess.TimeoutExpired: os.killpg( simulation.pid, signal.SIGINT) # send signal to the process group stdout, stderr = simulation.communicate() pause = True return_code = 33 # Parse/return results. if return_code in [0, 33]: trajectory_base, timeStopped = cutils._parse_binary_output( stdout, number_of_trajectories, number_timesteps, len(model.listOfSpecies), pause=pause) if model.tspan[2] - model.tspan[1] == 1: timeStopped = int(timeStopped) # Format results self.simulation_data = [] for trajectory in range(number_of_trajectories): data = {'time': trajectory_base[trajectory, :, 0]} for i in range(len(self.species)): data[self.species[i]] = trajectory_base[trajectory, :, i + 1] self.simulation_data.append(data) else: raise gillespyError.ExecutionError( "Error encountered while running simulation C++ file:" "\nReturn code: {0}.\nError:\n{1}\n".format( simulation.returncode, simulation.stderr)) if resume is not None or timeStopped != 0: self.simulation_data = cutils.c_solver_resume( timeStopped, self.simulation_data, t, resume=resume) return self.simulation_data, return_code
def run(cls, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, stochkit_home=None, algorithm=None, job_id=None, extra_args='', debug=False, profile=False, show_labels=False, **kwargs): """ Call out and run the solver. Collect the results. """ if len(kwargs) > 0: for key in kwargs: log.warning('Unsupported keyword argument to solver: {0}'.format(key)) if algorithm is None: raise SimulationError("No algorithm selected") # We write all StochKit input and output files to a temporary folder prefix_base_dir = tempfile.mkdtemp() prefix_out_dir = os.path.join(prefix_base_dir, 'output') os.mkdir(prefix_out_dir) if job_id is None: job_id = str(uuid.uuid4()) if isinstance(model, Model): # Write a temporary StochKit2 input file. outfile = os.path.join(prefix_base_dir, "temp_input_{0}.xml".format(job_id)) with open(outfile, 'w') as model_file_handle: model_file_handle.write(model.serialize()) elif isinstance(model, str): outfile = model else: raise InvalidModelError('Model must be either a GillesPy Model instance or an xml file name.') executable = cls.locate_executable(stochkit_home=stochkit_home, algorithm=algorithm) if executable is None: raise SimulationError("stochkit executable '{0}' not found. \ Make sure it is your path, or set STOCHKIT_HOME environment \ variable'".format(algorithm)) # Assemble argument list for StochKit out_dir = os.path.join(prefix_out_dir, job_id) if increment is None: increment = t / 20.0 num_output_points = round(t / increment) # Assemble the argument list args = '--model {0} --out-dir {1} -t {2} -i {3}'.format(outfile, out_dir, t, int(num_output_points)) directories = os.listdir(prefix_out_dir) if os.path.isdir(out_dir): if debug: print('Ensemble {0} already existed, using --force.'.format(job_id)) args += ' --force' # If we are using local mode, shell out and run StochKit # (SSA or Tau-leaping or ODE) cmd = ' '.join([executable, args, extra_args]) if debug: print("cmd: {0}".format(cmd)) # Execute try: handle = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) return_code = handle.wait() except OSError as e: raise SimulationError("Solver execution failed: {0}\n{1}".format(cmd, e)) try: stderr = handle.stderr.read() except Exception as e: stderr = 'Error reading stderr: {0}'.format(e) try: stdout = handle.stdout.read() except Exception as e: stdout = 'Error reading stdout: {0}'.format(e) if return_code != 0: raise SimulationError("Solver execution failed: '{0}' output: {1}{2}".format(cmd, stdout, stderr)) try: # Get data using solver specific function trajectories = cls.get_trajectories(out_dir, debug=debug, show_labels=show_labels) if len(trajectories) == 0: raise SimulationError("Solver execution failed: '{0}' output: {1}{2}".format(cmd, stdout, stderr)) if show_labels: labels, trajectories = trajectories trajectories = cls.label_trajectories(trajectories, labels) return trajectories except Exception as e: compile_log_file = os.path.join(prefix_base_dir, 'temp_input_{0}_generated_code'.format(job_id), 'compile-log.txt') log_file = os.path.join(prefix_out_dir, job_id, 'log.txt') for file_name in [compile_log_file, log_file]: if os.path.isfile(file_name): with open(file_name) as f: error = f.read() raise SimulationError("Error running simulation: {0}\n{1}\n".format(file_name, error)) raise SimulationError("Error using solver.get_trajectories('{0}'): {1}".format(out_dir, e)) finally: # Clean up if debug: print("prefix_base_dir={0}".format(prefix_base_dir)) print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) else: shutil.rmtree(prefix_base_dir)
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, show_labels=True, integrator='lsoda', integrator_options={}, **kwargs): """ :param model: gillespy2.model class object :param t: end time of simulation :param number_of_trajectories: Should be 1. This is deterministic and will always have same results :param increment: time step increment for plotting :param show_labels: If true, simulation returns a list of trajectories, where each list entry is a dictionary containing key value pairs of species : trajectory. If false, returns a numpy array with shape [traj_no, time, species] :param integrator: integrator to be used form scipy.integrate.ode. Options include 'vode', 'zvode', 'lsoda', 'dopri5', and 'dop835'. For more details, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html :param integrator_options: a dictionary containing options to the scipy integrator. for a list of options, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html. Example use: {max_step : 0, rtol : .01} :param kwargs: :return: """ if not isinstance(self, BasicODESolver): self = BasicODESolver() if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) if number_of_trajectories > 1: log.warning( "Generating duplicate trajectories for model with ODE Solver. Consider running with only 1 trajectory." ) start_state = [ model.listOfSpecies[species].initial_value for species in model.listOfSpecies ] # create mapping of species dictionary to array indices species_mappings = model.sanitized_species_names() species = list(species_mappings.keys()) parameter_mappings = model.sanitized_parameter_names() number_species = len(species) # create numpy array for timeline timeline = np.linspace(0, t, round(t / increment + 1)) # create numpy matrix to mark all state data of time and species trajectory_base = np.empty( (number_of_trajectories, timeline.size, number_species + 1)) # copy time values to all trajectory row starts trajectory_base[:, :, 0] = timeline # copy initial populations to base for i, s in enumerate(species): trajectory_base[:, 0, i + 1] = model.listOfSpecies[s].initial_value # compile reaction propensity functions for eval c_prop = OrderedDict() for r_name, reaction in model.listOfReactions.items(): c_prop[r_name] = compile(reaction.ode_propensity_function, '<string>', 'eval') result = trajectory_base[0] curr_time = 0 entry_count = 0 y0 = [0] * len(model.listOfSpecies) curr_state = OrderedDict() for i, s in enumerate(model.listOfSpecies.values()): curr_state[s.name] = s.initial_value y0[i] = s.initial_value for p_name, param in model.listOfParameters.items(): curr_state[p_name] = param.value rhs = ode(BasicODESolver.__f).set_integrator(integrator, **integrator_options) rhs.set_initial_value(y0, curr_time).set_f_params(curr_state, model, c_prop) while entry_count < timeline.size - 1: int_time = curr_time + increment entry_count += 1 y0 = rhs.integrate(int_time) curr_time += increment for i, spec in enumerate(model.listOfSpecies): curr_state[spec] = y0[i] result[entry_count][i + 1] = curr_state[spec] if show_labels: results_as_dict = {'time': timeline} for i, species in enumerate(model.listOfSpecies): results_as_dict[species] = result[:, i + 1] results = [results_as_dict] * number_of_trajectories else: results = np.stack([result] * number_of_trajectories, axis=0) return results
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, profile=False, show_labels=True, tau_tol=0.03, event_sensitivity=100, integrator='LSODA', integrator_options={}, timeout=None, **kwargs): """ Function calling simulation of the model. This is typically called by the run function in GillesPy2 model objects and will inherit those parameters which are passed with the model as the arguments this run function. Attributes ---------- model : GillesPy2.Model GillesPy2 model object to simulate t : int Simulation run time number_of_trajectories : int The number of times to sample the chemical master equation. Each trajectory will be returned at the end of the simulation. Optional, defaults to 1. increment : float Save point increment for recording data seed : int The random seed for the simulation. Optional, defaults to None. debug : bool (False) Set to True to provide additional debug information about the simulation. profile : bool (Fasle) Set to True to provide information about step size (tau) taken at each step. show_labels: bool (True) If true, simulation returns a list of trajectories, where each list entry is a dictionary containing key value pairs of species : trajectory. If false, returns a numpy array with shape [traj_no, time, species] tau_tol: float Tolerance level for Tau leaping algorithm. Larger tolerance values will result in larger tau steps. Default value is 0.03. event_sensitivity: int Number of data points to be inspected between integration steps/save points for event detection integrator: String integrator method to be used form scipy.integrate.solve_ivp. Options include 'RK45', 'RK23', 'Radau', 'BDF', and 'LSODA'. For more details, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html integrator_options: dictionary contains options to the scipy integrator. by default, this includes rtol=1e-9 and atol=1e-12. for a list of options, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html. Example use: {max_step : 0, rtol : .01} """ if isinstance(self, type): self = BasicTauHybridSolver() self.stop_event = threading.Event() if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) if timeout is not None and timeout <= 0: timeout = None sim_thread = threading.Thread(target=self.___run, args=(model, ), kwargs={ 't': t, 'number_of_trajectories': number_of_trajectories, 'increment': increment, 'seed': seed, 'debug': debug, 'profile': profile, 'show_labels': show_labels, 'timeout': timeout, 'tau_tol': tau_tol, 'event_sensitivity': event_sensitivity, 'integrator': integrator, 'integrator_options': integrator_options }) try: sim_thread.start() sim_thread.join(timeout=timeout) self.stop_event.set() while self.result is None: pass except: pass if hasattr(self, 'has_raised_exception'): raise self.has_raised_exception return self.result, self.rc
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, show_labels=True, live_output=None, live_output_options={}, timeout=None, resume=None, **kwargs): """ Run the SSA algorithm using a NumPy for storing the data in arrays and generating the timeline. :param model: The model on which the solver will operate. :param t: The end time of the solver. :param number_of_trajectories: The number of times to sample the chemical master equation. Each trajectory will be returned at the end of the simulation. :param increment: The time step of the solution. :param seed: The random seed for the simulation. Defaults to None. :param debug: Set to True to provide additional debug information about the simulation. :param resume: Result of a previously run simulation, to be resumed :param live_output : str The type of output to be displayed by solver. Can be "progress", "text", or "graph". :param live_output_options : dictionary contains options for live_output. By default {"interval":1}. "interval" specifies seconds between displaying. "clear_output" specifies if display should be refreshed with each display :return: a list of each trajectory simulated. """ if isinstance(self, type): self = NumPySSASolver() self.stop_event = Event() self.pause_event = Event() if timeout is not None and timeout <= 0: timeout = None if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) # create numpy array for timeline if resume is not None: # start where we last left off if resuming a simulation lastT = resume['time'][-1] step = lastT - resume['time'][-2] timeline = np.arange(lastT, t + step, step) else: timeline = np.linspace(0, t, int(round(t / increment + 1))) species = list(model._listOfSpecies.keys()) trajectory_base, tmpSpecies = nputils.numpy_trajectory_base_initialization( model, number_of_trajectories, timeline, species, resume=resume) # curr_time and curr_state are list of len 1 so that __run receives reference if resume is not None: total_time = [resume['time'][-1]] else: total_time = [0] curr_state = [None] live_grapher = [None] sim_thread = Thread(target=self.___run, args=( model, curr_state, total_time, timeline, trajectory_base, live_grapher, ), kwargs={ 't': t, 'number_of_trajectories': number_of_trajectories, 'increment': increment, 'seed': seed, 'debug': debug, 'show_labels': show_labels, 'timeout': timeout, 'resume': resume, }) try: sim_thread.start() if live_output is not None: import gillespy2.core.liveGraphing live_output_options['type'] = live_output gillespy2.core.liveGraphing.valid_graph_params( live_output_options) if resume is not None: resumeTest = True # If resuming, relay this information to live_grapher else: resumeTest = False live_grapher[0] = gillespy2.core.liveGraphing.LiveDisplayer( model, timeline, number_of_trajectories, live_output_options, resume=resumeTest) display_timer = gillespy2.core.liveGraphing.RepeatTimer( live_output_options['interval'], live_grapher[0].display, args=( curr_state, total_time, trajectory_base, )) display_timer.start() sim_thread.join(timeout=timeout) if live_grapher[0] is not None: display_timer.cancel() self.stop_event.set() while self.result is None: pass except KeyboardInterrupt: if live_output: display_timer.cancel() self.pause_event.set() while self.result is None: pass if hasattr(self, 'has_raised_exception'): raise self.has_raised_exception return self.result, self.rc
def run(self, model, t=20, number_of_trajectories=1, increment=0.05, seed=None, debug=False, profile=False, show_labels=True, switch_tol=0.03, tau_tol=0.03, integrator='lsoda', integrator_options={}, **kwargs): """ Function calling simulation of the model. This is typically called by the run function in GillesPy2 model objects and will inherit those parameters which are passed with the model as the arguments this run function. Attributes ---------- model : GillesPy2.Model GillesPy2 model object to simulate t : int Simulation run time number_of_trajectories : int The number of times to sample the chemical master equation. Each trajectory will be returned at the end of the simulation. Optional, defaults to 1. increment : float Save point increment for recording data seed : int The random seed for the simulation. Optional, defaults to None. debug : bool (False) Set to True to provide additional debug information about the simulation. profile : bool (Fasle) Set to True to provide information about step size (tau) taken at each step. show_labels: bool (True) If true, simulation returns a list of trajectories, where each list entry is a dictionary containing key value pairs of species : trajectory. If false, returns a numpy array with shape [traj_no, time, species] switch_tol: float Relative error tolerance value for deterministic/stochastic switching condition between 0.0 and 1.0 tau_tol: float Relative error tolerance value for calculating tau step between 0.0 and 1.0 integrator: String integrator to be used form scipy.integrate.ode. Options include 'vode', 'zvode', 'lsoda', 'dopri5', and 'dop835'. For more details, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html integrator_options: dictionary contains options to the scipy integrator. for a list of options, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html. Example use: {max_step : 0, rtol : .01} """ if not isinstance(self, BasicTauHybridSolver): self = BasicTauHybridSolver() if len(kwargs) > 0: for key in kwargs: log.warning( 'Unsupported keyword argument to {0} solver: {1}'.format( self.name, key)) if debug: print("t = ", t) print("increment = ", increment) # create mapping of species dictionary to array indices species_mappings = model.sanitized_species_names() species = list(species_mappings.keys()) parameter_mappings = model.sanitized_parameter_names() number_species = len(species) # create numpy array for timeline timeline = np.linspace(0, t, round(t / increment + 1)) # create numpy matrix to mark all state data of time and species trajectory_base = np.empty( (number_of_trajectories, timeline.size, number_species + 1)) # copy time values to all trajectory row starts trajectory_base[:, :, 0] = timeline spec_modes = ['continuous', 'dynamic', 'discrete'] # copy initial populations to base for i, s in enumerate(species): if model.listOfSpecies[s].mode not in spec_modes: raise SpeciesError( 'Species mode can only be \'continuous\', \'dynamic\', or \'discrete\'.' ) trajectory_base[:, 0, i + 1] = model.listOfSpecies[s].initial_value det_spec = { species: True for (species, value) in model.listOfSpecies.items() if value.mode == 'dynamic' } det_rxn = { rxn: False for (rxn, value) in model.listOfReactions.items() } dependencies = OrderedDict() for reaction in model.listOfReactions: dependencies[reaction] = set() [ dependencies[reaction].add(reactant.name) for reactant in model.listOfReactions[reaction].reactants ] [ dependencies[reaction].add(product.name) for product in model.listOfReactions[reaction].products ] pure_ode = True for reaction in model.listOfReactions.keys(): for dep in dependencies[reaction]: if model.listOfSpecies[dep].mode != 'continuous': pure_ode = False break if debug: print('dependencies') print(dependencies) simulation_data = [] # Set seed if supplied if seed is not None: if not isinstance(seed, int): seed = int(seed) if seed > 0: random.seed(seed) else: raise ModelError('seed must be a positive integer') for trajectory_num in range(number_of_trajectories): steps_taken = [] # For use with profile=True steps_rejected = 0 # For use with profile=True, incremented when negative state detected entry_count = 0 # NumPy array index for results at each timestep trajectory = trajectory_base[ trajectory_num] # NumPy array containing this simulation's results y0 = [0] * (len(model.listOfReactions) + len(model.listOfRateRules) ) # Integration initial state rxn_offset = OrderedDict( ) # Offsets for root-finding method of detecting reactions fired propensities = OrderedDict( ) # Propensities evaluated at current state curr_state = OrderedDict() # Current state of the system curr_time = 0 # Current Simulation Time curr_state['vol'] = model.volume # Model volume save_time = 0 # Time of next entry to results data = OrderedDict() # Dictionary for show_labels results data['time'] = timeline # All time entries for show_labels results # Record Highest Order reactant for each reaction and set error tolerance HOR, reactants, mu_i, sigma_i, g_i, epsilon_i, critical_threshold = Tau.initialize( model, tau_tol) # initialize species population state for s in model.listOfSpecies: curr_state[s] = model.listOfSpecies[s].initial_value # intialize parameters to current state for p in model.listOfParameters: curr_state[p] = model.listOfParameters[p].value # Set reactions to uniform random number for i, r in enumerate(model.listOfReactions): rxn_offset[r] = math.log(random.uniform(0, 1)) if debug: print("Setting Random number ", rxn_offset[r], " for ", model.listOfReactions[r].name) # One-time compilations to reduce time spent with eval compiled_reactions = OrderedDict() for i, r in enumerate(model.listOfReactions): compiled_reactions[r] = compile( model.listOfReactions[r].propensity_function, '<string>', 'eval') compiled_rate_rules = OrderedDict() for i, rr in enumerate(model.listOfRateRules): compiled_rate_rules[rr] = compile( model.listOfRateRules[rr].expression, '<string>', 'eval') compiled_inactive_reactions = OrderedDict() compiled_propensities = OrderedDict() for i, r in enumerate(model.listOfReactions): compiled_propensities[r] = compile( model.listOfReactions[r].propensity_function, '<string>', 'eval') all_compiled = OrderedDict() all_compiled['rxns'] = compiled_reactions all_compiled['rules'] = compiled_rate_rules all_compiled['inactive_rxns'] = compiled_inactive_reactions timestep = 0 # Each save step while entry_count < timeline.size: # Until save step reached while curr_time < save_time: # Get current propensities for i, r in enumerate(model.listOfReactions): propensities[r] = eval(compiled_propensities[r], curr_state) # Calculate Tau statistics and select a good tau step tau_args = [ HOR, reactants, mu_i, sigma_i, g_i, epsilon_i, tau_tol, critical_threshold, model, propensities, curr_state, curr_time, save_time ] tau_step = save_time - curr_time if pure_ode else Tau.select( *tau_args) if profile: steps_taken.append(tau_step) # Calculate sd and CV for hybrid switching and flag deterministic reactions switch_args = [ mu_i, sigma_i, model, propensities, curr_state, tau_step, det_spec, dependencies, switch_tol ] sd, CV = self.calculate_statistics(*switch_args) deterministic_reactions = self.flag_det_reactions( model, det_spec, det_rxn, dependencies) if debug: print( 'Calculating mean, standard deviation at time {0}'. format((curr_time + tau_step))) print('mean: {0}'.format(mu_i)) print('standard deviation: {0}'.format(sd)) print('CV: {0}'.format(CV)) print('det_spec: {0}'.format(det_spec)) print('det_rxn: {0}'.format(det_rxn)) # Set active reactions and rate rules for this integration step self.toggle_reactions(model, all_compiled, deterministic_reactions, dependencies, curr_state, rxn_offset, det_spec) active_rr = compiled_rate_rules[deterministic_reactions] # Build integration start state y0 = [0] * (len(compiled_reactions) + len(active_rr)) for i, spec in enumerate(active_rr): y0[i] = curr_state[spec] for i, rxn in enumerate(compiled_reactions): y0[i + len(active_rr)] = rxn_offset[rxn] # Back up current state prev_y0 = y0.copy() prev_curr_state = curr_state.copy() prev_curr_time = curr_time # Integrate to selected Tau, if a species goes negative, integration failed, try again. loop_cnt = 0 while True: loop_cnt += 1 if loop_cnt > 100: raise Exception( "Loop over __get_reactions() exceeded loop count" ) # Perform integration over this tau step reactions, y0, curr_state, curr_time = self.__get_reactions( integrator, integrator_options, tau_step, curr_state, y0, model, curr_time, save_time, propensities, compiled_reactions, active_rr, rxn_offset, debug) # Update curr_state with the result of the SSA reaction that fired species_modified = OrderedDict() for i, r in enumerate(compiled_reactions): if reactions[r] > 0: for reactant in model.listOfReactions[ r].reactants: species_modified[str(reactant)] = True curr_state[str( reactant )] -= model.listOfReactions[r].reactants[ reactant] * reactions[r] for product in model.listOfReactions[ r].products: species_modified[str(product)] = True curr_state[str( product)] += model.listOfReactions[ r].products[product] * reactions[r] # If a species is is negative, rewind and record neg_state = False for s in species_modified.keys(): if curr_state[s] < 0: neg_state = True if debug: print( "Negative state detected: curr_state[{0}]= {1}" .format(s, curr_state[s])) if neg_state: steps_rejected += 1 if debug: print("\trxn={0}".format(reactions)) y0 = prev_y0.copy() curr_state = prev_curr_state.copy() curr_time = prev_curr_time tau_step = tau_step / 2 if debug: print("Resetting curr_state[{0}]= {1}".format( s, curr_state[s])) if debug: print( "\tRejecting step, taking step of half size, tau_step={0}" .format(tau_step)) else: break # breakout of the while True if profile: steps_taken.append(tau_step) # Save step reached for i in range(number_species): trajectory[entry_count][i + 1] = curr_state[species[i]] save_time += increment timestep += 1 entry_count += 1 # End of trajectory if show_labels: for i in range(number_species): data[species[i]] = trajectory[:, i + 1] simulation_data.append(data) else: simulation_data = trajectory_base if profile: print(steps_taken) print("Total Steps Taken: ", len(steps_taken)) print("Total Steps Rejected: ", steps_rejected) return simulation_data