Exemple #1
0
    def _load_trajectories(self, directory):
        """Read simulation results from output files.

        Returns `tout` and `trajectories` arrays.
        """
        files = [
            filename for filename in os.listdir(directory)
            if re.match(self._prefix, filename)
        ]
        if len(files) == 0:
            raise SimulatorException(
                "Cannot find any output files to load data from.")
        if len(files) != len(self.param_values):
            raise SimulatorException(
                "Number of input files (%d) does not match number "
                "of requested simulations (%d)." %
                (len(files), len(self.param_values)))
        n_sims = len(files)
        trajectories = [None] * n_sims
        tout = [None] * n_sims
        traj_n = np.ones((len(self.tspan), self._len_species)) * float('nan')
        tout_n = np.ones(len(self.tspan)) * float('nan')
        # load the data
        indir_prefix = os.path.join(directory, self._prefix)
        for n in range(n_sims):
            trajectories[n] = traj_n.copy()
            tout[n] = tout_n.copy()
            filename = indir_prefix + "_" + str(n)
            if not os.path.isfile(filename):
                raise Exception("Cannot find input file " + filename)
            # determine optimal loading method
            if n == 0:
                (data, use_pandas) = self._test_pandas(filename)
            # load data
            else:
                if use_pandas:
                    data = self._load_with_pandas(filename)
                else:
                    data = self._load_with_openfile(filename)
            # store data
            tout[n] = data[:, 0]
            trajectories[n][:, self._out_species] = data[:, 1:]
            # volume correction
            if self.vol:
                trajectories[n][:, self._out_species] *= (N_A * self.vol)
        return np.array(tout), np.array(trajectories)
Exemple #2
0
    def __init__(self,
                 model,
                 tspan=None,
                 initials=None,
                 param_values=None,
                 verbose=False,
                 **kwargs):
        super(CupSodaSimulator, self).__init__(model,
                                               tspan=tspan,
                                               initials=initials,
                                               param_values=param_values,
                                               verbose=verbose,
                                               **kwargs)
        self.gpu = kwargs.get('gpu', 0)
        self._obs_species_only = kwargs.get('obs_species_only', True)
        self._cleanup = kwargs.get('cleanup', True)
        self._prefix = kwargs.get('prefix', self._model.name.replace('.', '_'))
        self._base_dir = kwargs.get('base_dir', None)
        self.integrator = kwargs.get('integrator', 'cupsoda')

        # generate the equations for the model
        pysb.bng.generate_equations(self._model, self._cleanup, self.verbose)

        # build integrator options list from our defaults and any kwargs
        # passed to this function
        options = {}
        if self.default_integrator_options.get(self.integrator):
            options.update(self.default_integrator_options[
                self.integrator])  # default options
        else:
            raise SimulatorException("Integrator type '" + self.integrator +
                                     "' not recognized.")
        options.update(kwargs.get('integrator_options', {}))  # overwrite

        # defaults
        self.opts = options
        self._out_species = None

        # private variables (to reduce the number of function calls)
        self._len_rxns = len(self._model.reactions)
        self._len_species = len(self._model.species)
        self._len_params = len(self._model.parameters)
        self._model_parameters_rules = self._model.parameters_rules()

        # Set cupsoda verbosity level
        logger_level = self._logger.logger.getEffectiveLevel()
        if logger_level <= EXTENDED_DEBUG:
            self._cupsoda_verbose = 2
        elif logger_level <= logging.DEBUG:
            self._cupsoda_verbose = 1
        else:
            self._cupsoda_verbose = 0

        # regex for extracting cupSODA reported running time
        self._running_time_regex = re.compile(r'Running time:\s+(\d+\.\d+)')
Exemple #3
0
    def run(self,
            tspan=None,
            initials=None,
            param_values=None,
            n_runs=1,
            method='ssa',
            output_dir=None,
            output_file_basename=None,
            cleanup=None,
            population_maps=None,
            **additional_args):
        """
        Simulate a model using BioNetGen

        Parameters
        ----------
        tspan: vector-like
            time span of simulation
        initials: vector-like, optional
            initial conditions of model
        param_values : vector-like or dictionary, optional
            Values to use for every parameter in the model. Ordering is
            determined by the order of model.parameters.
            If not specified, parameter values will be taken directly from
            model.parameters.
        n_runs: int
            number of simulations to run
        method : str
            Type of simulation to run. Must be one of:

                * 'ssa' - Stochastic Simulation Algorithm (direct method with
                  propensity sorting)

                * 'nf' - Stochastic network free simulation with NFsim.
                  Performs Hybrid Particle/Population simulation if
                  population_maps argument is supplied

                * 'pla' - Partioned-leaping algorithm (variant of tau-leaping
                  algorithm)

                * 'ode' - ODE simulation (Sundials CVODE algorithm)

        output_dir : string, optional
            Location for temporary files generated by BNG. If None (the
            default), uses a temporary directory provided by the system. A
            temporary directory with a random name is created within the
            supplied location.
        output_file_basename : string, optional
            This argument is used as a prefix for the temporary BNG
            output directory, rather than the individual files.
        cleanup : bool, optional
            If True (default), delete the temporary files after the
            simulation is finished. If False, leave them in place (Useful for
            debugging). The default value, None, means to use the value
            specified in :py:func:`__init__`.
        population_maps: list of PopulationMap
            List of :py:class:`PopulationMap` objects for hybrid
            particle/population modeling. Only used when method='nf'.
        additional_args: kwargs, optional
            Additional arguments to pass to BioNetGen

        Examples
        --------

        Simulate a model using network free simulation (NFsim):

        >>> from pysb.examples import robertson
        >>> from pysb.simulator.bng import BngSimulator
        >>> model = robertson.model
        >>> sim = BngSimulator(model, tspan=np.linspace(0, 1))
        >>> x = sim.run(n_runs=1, method='nf')


        """
        super(BngSimulator, self).run(tspan=tspan,
                                      initials=initials,
                                      param_values=param_values,
                                      _run_kwargs=locals())

        if cleanup is None:
            cleanup = self.cleanup

        if method not in self._SIMULATOR_TYPES:
            raise ValueError("Method must be one of " +
                             str(self._SIMULATOR_TYPES))

        if method != 'nf' and population_maps:
            raise ValueError('population_maps argument is only used when '
                             'method is "nf"')

        if method == 'nf':
            if population_maps is not None and (
                    not isinstance(population_maps, collections.Iterable)
                    or any(not isinstance(pm, PopulationMap)
                           for pm in population_maps)):
                raise ValueError('population_maps should be a list of '
                                 'PopulationMap objects')
            model_additional_species = self.initials_dict.keys()
        else:
            model_additional_species = None

        tspan_lin_spaced = np.allclose(
            self.tspan,
            np.linspace(self.tspan[0], self.tspan[-1], len(self.tspan)))
        if method == 'nf' and (not tspan_lin_spaced or self.tspan[0] != 0.0):
            raise SimulatorException('NFsim requires tspan to be linearly '
                                     'spaced starting at t=0')

        # BNG requires t_start even when supplying sample_times
        additional_args['t_start'] = self.tspan[0]
        if tspan_lin_spaced:
            # Just supply t_end and n_steps
            additional_args['n_steps'] = len(self.tspan) - 1
            additional_args['t_end'] = self.tspan[-1]
        else:
            additional_args['sample_times'] = self.tspan

        additional_args['method'] = method
        additional_args['print_functions'] = True
        verbose_bool = self._logger.logger.getEffectiveLevel() <= logging.DEBUG
        extended_debug = self._logger.logger.getEffectiveLevel() <= \
                         EXTENDED_DEBUG
        additional_args['verbose'] = extended_debug
        params_names = [g.name for g in self._model.parameters]

        n_param_sets = self.initials_length
        total_sims = n_runs * n_param_sets

        self._logger.info('Running %d BNG %s simulations' %
                          (total_sims, method))

        model_to_load = None
        hpp_bngl = None

        if population_maps:
            self._logger.debug('Generating hybrid particle-population model')
            hpp_bngl = generate_hybrid_model(self._model,
                                             population_maps,
                                             model_additional_species,
                                             verbose=extended_debug)
        else:
            model_to_load = self._model

        with BngFileInterface(
                model_to_load,
                verbose=verbose_bool,
                output_dir=output_dir,
                output_prefix=output_file_basename,
                cleanup=cleanup,
                model_additional_species=model_additional_species) as bngfile:
            if hpp_bngl:
                hpp_bngl_filename = os.path.join(bngfile.base_directory,
                                                 'hpp_model.bngl')
                self._logger.debug('HPP BNGL:\n\n' + hpp_bngl)
                with open(hpp_bngl_filename, 'w') as f:
                    f.write(hpp_bngl)
            if method != 'nf':
                # TODO: Write existing netfile if already generated
                bngfile.action('generate_network',
                               overwrite=True,
                               verbose=extended_debug)
            if output_file_basename is None:
                prefix = 'pysb'
            else:
                prefix = output_file_basename

            sim_prefix = 0
            for pset_idx in range(n_param_sets):
                for n in range(len(self.param_values[pset_idx])):
                    bngfile.set_parameter(params_names[n],
                                          self.param_values[pset_idx][n])
                for cp, values in self.initials_dict.items():
                    if population_maps:
                        for pm in population_maps:
                            if pm.complex_pattern.is_equivalent_to(cp):
                                cp = pm.counter_species
                                break
                    bngfile.set_concentration(cp, values[pset_idx])
                for sim_rpt in range(n_runs):
                    tmp = additional_args.copy()
                    tmp['prefix'] = '{}{}'.format(prefix, sim_prefix)
                    bngfile.action('simulate', **tmp)
                    bngfile.action('resetConcentrations')
                    sim_prefix += 1
            if hpp_bngl:
                bngfile.execute(reload_netfile=hpp_bngl_filename,
                                skip_file_actions=True)
            else:
                bngfile.execute()
            if method != 'nf':
                load_equations(self.model, bngfile.net_filename)
            list_of_yfull = \
                BngFileInterface.read_simulation_results_multi(
                [bngfile.base_filename + str(n) for n in range(total_sims)])

        tout = []
        species_out = []
        obs_exp_out = []
        for i in range(total_sims):
            yfull = list_of_yfull[i]
            yfull_view = yfull.view(float).reshape(len(yfull), -1)

            tout.append(yfull_view[:, 0])

            if method == 'nf':
                obs_exp_out.append(yfull_view[:, 1:])
            else:
                species_out.append(yfull_view[:,
                                              1:(len(self.model.species) + 1)])
                if len(self.model.observables) or len(self.model.expressions):
                    obs_exp_out.append(
                        yfull_view[:, (len(self.model.species) +
                                       1):(len(self.model.species) + 1) +
                                   len(self.model.observables) +
                                   len(self.model.expressions_dynamic())])

        return SimulationResult(self,
                                tout=tout,
                                trajectories=species_out,
                                observables_and_expressions=obs_exp_out,
                                simulations_per_param_set=n_runs)
Exemple #4
0
    def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
                   tout):
        _indirs = {}
        _outdirs = {}
        p = {}

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Start simulations
        for gpu in gpus:
            _indirs[gpu] = os.path.join(
                outdir, "INPUT_GPU{}_{}".format(gpu, chunk_idx))
            os.mkdir(_indirs[gpu])
            _outdirs[gpu] = os.path.join(
                outdir, "OUTPUT_GPU{}_{}".format(gpu, chunk_idx))

            # Create cupSODA input files
            self._create_input_files(_indirs[gpu], sims[gpu], cmtx)

            # Build command
            # ./cupSODA input_model_folder blocks output_folder simulation_
            # file_prefix gpu_number fitness_calculation memory_use dump
            command = [
                bin_path, _indirs[gpu],
                str(self.n_blocks), _outdirs[gpu], self._prefix,
                str(gpu), '0', self._memory_usage,
                str(self._cupsoda_verbose)
            ]

            self._logger.info("Running cupSODA: " + ' '.join(command))

            # Run simulation and return trajectories
            p[gpu] = subprocess.Popen(command,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        # Read results
        for gpu in gpus:
            (p_out, p_err) = p[gpu].communicate()
            p_out = p_out.decode('utf-8')
            p_err = p_err.decode('utf-8')
            logger_level = self._logger.logger.getEffectiveLevel()
            if logger_level <= logging.INFO:
                run_time_match = self._running_time_regex.search(p_out)
                if run_time_match:
                    self._logger.info('cupSODA GPU {} chunk {} reported '
                                      'time: {} seconds'.format(
                                          gpu, chunk_idx,
                                          run_time_match.group(1)))
            self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
                gpu, chunk_idx, p_out))
            if p_err:
                self._logger.error('cupSODA GPU {} chunk {} '
                                   'stderr:\n{}'.format(gpu, chunk_idx, p_err))
            if p[gpu].returncode:
                raise SimulatorException(
                    "cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
                        gpu, chunk_idx, p_out.rstip("at line"),
                        p_err.rstrip()))
            tout_run, trajectories_run = self._load_trajectories(
                _outdirs[gpu], sims[gpu])
            if trajectories is None:
                tout = tout_run
                trajectories = trajectories_run
            else:
                tout = np.concatenate((tout, tout_run))
                trajectories = np.concatenate((trajectories, trajectories_run))

        return tout, trajectories
Exemple #5
0
    def __init__(self,
                 model,
                 tspan=None,
                 initials=None,
                 param_values=None,
                 verbose=False,
                 **kwargs):
        super(CupSodaSimulator, self).__init__(model,
                                               tspan=tspan,
                                               initials=initials,
                                               param_values=param_values,
                                               verbose=verbose,
                                               **kwargs)
        self.gpu = kwargs.pop('gpu', (0, ))
        if not isinstance(self.gpu, collections.Iterable):
            self.gpu = [self.gpu]
        self._obs_species_only = kwargs.pop('obs_species_only', True)
        self._cleanup = kwargs.pop('cleanup', True)
        self._prefix = kwargs.pop('prefix', self._model.name)
        # Sanitize the directory - cupsoda doesn't handle spaces etc. well
        self._prefix = re.sub('[^0-9a-zA-Z]', '_', self._prefix)
        self._base_dir = kwargs.pop('base_dir', None)
        self.integrator = kwargs.pop('integrator', 'cupsoda')
        integrator_options = kwargs.pop('integrator_options', {})

        if kwargs:
            raise ValueError('Unknown keyword argument(s): {}'.format(
                ', '.join(kwargs.keys())))

        unknown_integrator_options = set(integrator_options.keys()).difference(
            self._integrator_options_allowed)
        if unknown_integrator_options:
            raise ValueError(
                'Unknown integrator_options: {}. Allowed options: {}'.format(
                    ', '.join(unknown_integrator_options),
                    ', '.join(self._integrator_options_allowed)))

        # generate the equations for the model
        pysb.bng.generate_equations(self._model, self._cleanup, self.verbose)

        # build integrator options list from our defaults and any kwargs
        # passed to this function
        options = {}
        if self.default_integrator_options.get(self.integrator):
            options.update(self.default_integrator_options[
                self.integrator])  # default options
        else:
            raise SimulatorException("Integrator type '" + self.integrator +
                                     "' not recognized.")
        options.update(integrator_options)  # overwrite

        # defaults
        self.opts = options
        self._out_species = None

        # private variables (to reduce the number of function calls)
        self._len_rxns = len(self._model.reactions)
        self._len_species = len(self._model.species)
        self._len_params = len(self._model.parameters)
        self._model_parameters_rules = self._model.parameters_rules()

        # Set cupsoda verbosity level
        logger_level = self._logger.logger.getEffectiveLevel()
        if logger_level <= EXTENDED_DEBUG:
            self._cupsoda_verbose = 2
        elif logger_level <= logging.DEBUG:
            self._cupsoda_verbose = 1
        else:
            self._cupsoda_verbose = 0

        # regex for extracting cupSODA reported running time
        self._running_time_regex = re.compile(r'Running time:\s+(\d+\.\d+)')
Exemple #6
0
    def run(self,
            tspan=None,
            initials=None,
            param_values=None,
            n_runs=1,
            algorithm='ssa',
            output_dir=None,
            num_processors=1,
            seed=None,
            method=None,
            stats=False,
            epsilon=None,
            threshold=None):
        """
        Run a simulation and returns the result (trajectories)

        .. note::
            ``tspan``, ``initials`` and ``param_values`` values supplied to
            this method will persist to future :func:`run` calls.

        Parameters
        ----------
        tspan
        initials
        param_values
            See parameter definitions in :class:`StochKitSimulator`.
        n_runs : int
            The number of simulation runs per parameter set. The total
            number of simulations is therefore n_runs * max(len(initials),
            len(param_values))
        algorithm : str
            Choice of 'ssa' (Gillespie's stochastic simulation algorithm) or
            'tau_leaping' (Tau leaping algorithm)
        output_dir : str or None
            Directory for StochKit output, or None for a system-specific
            temporary directory
        num_processors : int
            Number of CPU cores for StochKit to use (default: 1)
        seed : int or None
            A random number seed for StochKit. Set to any integer value for
            deterministic behavior.
        method : str or None
            StochKit "method" argument, default None. Only used by StochKit
            2.1 (not yet released at time of writing).
        stats : bool
            Ask StochKit to generate simulation summary statistics if True
        epsilon : float or None
            Tolerance parameter for tau-leaping algorithm
        threshold : int or None
            Threshold parameter for tau-leaping algorithm

        Returns
        -------
        A :class:`SimulationResult` object
        """
        super(StochKitSimulator, self).run(tspan=tspan,
                                           initials=initials,
                                           param_values=param_values)

        self._logger.info('Running StochKit with {:d} parameter sets, '
                          '{:d} repeats ({:d} simulations total)'.format(
                              len(self.initials), n_runs,
                              len(self.initials) * n_runs))

        if output_dir is None:
            self._outdir = tempfile.mkdtemp()
        else:
            self._outdir = output_dir

        # Calculate time intervals and validate
        t_range = self.tspan[-1] - self.tspan[0]
        t_length = len(self.tspan)
        if not np.allclose(self.tspan, np.linspace(0, self.tspan[-1],
                                                   t_length)):
            raise SimulatorException('StochKit requires tspan to be linearly '
                                     'spaced starting at t=0')

        try:
            trajectories = self._run_stochkit(t=t_range,
                                              number_of_trajectories=n_runs,
                                              t_length=t_length,
                                              seed=seed,
                                              algorithm=algorithm,
                                              method=method,
                                              num_processors=num_processors,
                                              stats=stats,
                                              epsilon=epsilon,
                                              threshold=threshold)
        finally:
            if self.cleanup:
                try:
                    shutil.rmtree(self._outdir)
                except OSError:
                    pass

        # set output time points
        trajectories_array = np.array(trajectories)
        self.tout = trajectories_array[:, :, 0] + self.tspan[0]
        # species
        species = trajectories_array[:, :, 1:]
        return SimulationResult(self,
                                self.tout,
                                species,
                                simulations_per_param_set=n_runs)
Exemple #7
0
    def _run_stochkit(self,
                      t=20,
                      t_length=100,
                      number_of_trajectories=1,
                      seed=None,
                      algorithm='ssa',
                      method=None,
                      num_processors=1,
                      stats=False,
                      epsilon=None,
                      threshold=None):

        extra_args = '-p {:d}'.format(num_processors)

        # Random seed for stochastic simulation
        if seed is not None:
            extra_args += ' --seed {:d}'.format(seed)

        # Keep all the trajectories by default
        extra_args += ' --keep-trajectories'

        # Number of trajectories
        extra_args += ' --realizations {:d}'.format(number_of_trajectories)

        # We generally don't need the extra stats
        if not stats:
            extra_args += ' --no-stats'

        if method is not None:  # This only works for StochKit 2.1
            extra_args += ' --method {}'.format(method)

        if epsilon is not None:
            extra_args += ' --epsilon {:f}'.format(epsilon)

        if threshold is not None:
            extra_args += ' --threshold {:d}'.format(threshold)

        # Find binary for selected algorithm (SSA, Tau-leaping, ...)
        if algorithm not in ['ssa', 'tau_leaping']:
            raise SimulatorException(
                "algorithm must be 'ssa' or 'tau_leaping'")

        executable = get_path('stochkit_{}'.format(algorithm))

        # Output model file to directory
        fname = os.path.join(self._outdir, 'pysb.xml')

        trajectories = []
        for i in range(len(self.initials)):
            # We write all StochKit output files to a temporary folder
            prefix_outdir = os.path.join(self._outdir, 'output_{}'.format(i))

            # Export model file
            stoch_xml = StochKitExporter(self._model).export(
                self.initials[i], self.param_values[i])
            self._logger.log(EXTENDED_DEBUG, 'StochKit XML:\n' + stoch_xml)
            with open(fname, 'w') as f:
                f.write(stoch_xml)

            # Assemble the argument list
            args = '--model {} --out-dir {} -t {:f} -i {:d}'.format(
                fname, prefix_outdir, t, t_length - 1)

            # If we are using local mode, shell out and run StochKit
            # (SSA or Tau-leaping or ODE)
            cmd = '{} {} {}'.format(executable, args, extra_args)
            self._logger.debug("StochKit run {} of {} (cmd: {})".format(
                (i + 1), len(self.initials), cmd))

            # Execute
            try:
                handle = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          shell=True)
                return_code = handle.wait()
            except OSError as e:
                raise SimulatorException("StochKit execution failed: \
                {0}\n{1}".format(cmd, e))

            try:
                stderr = handle.stderr.read()
            except Exception as e:
                stderr = 'Error reading stderr: {0}'.format(e)
            try:
                stdout = handle.stdout.read()
            except Exception as e:
                stdout = 'Error reading stdout: {0}'.format(e)

            if return_code != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            traj_dir = os.path.join(prefix_outdir, 'trajectories')
            try:
                trajectories.extend([
                    np.loadtxt(os.path.join(traj_dir, f))
                    for f in sorted(os.listdir(traj_dir))
                ])
            except Exception as e:
                raise SimulatorException(
                    "Error reading StochKit trajectories: {0}"
                    "\nSTDOUT:{1}\nSTDERR:{2}".format(e, stdout, stderr))

            if len(trajectories) == 0 or len(stderr) != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            self._logger.debug("StochKit STDOUT:\n{0}".format(stdout))

        # Return data
        return trajectories
Exemple #8
0
    def run(self, tspan=None, initials=None, param_values=None):
        """Perform a set of integrations.

        Returns a :class:`.SimulationResult` object.

        Parameters
        ----------
        tspan : list-like, optional
            Time values at which the integrations are sampled. The first and
            last values define the time range.
        initials : list-like, optional
            Initial species concentrations for all simulations. Dimensions are
            number of simulation x number of species.    
        param_values : list-like, optional
            Parameters for all simulations. Dimensions are number of
            simulations x number of parameters.

        Returns
        -------
        A :class:`SimulationResult` object

        Notes
        -----
        1. An exception is thrown if `tspan` is not defined in either
           `__init__`or `run`.
           
        2. If neither `initials` nor `param_values` are defined in either 
           `__init__` or `run` a single simulation is run with the initial 
           concentrations and parameter values defined in the model.

        """
        super(CupSodaSimulator, self).run(tspan=tspan,
                                          initials=initials,
                                          param_values=param_values,
                                          _run_kwargs=[])

        # Create directories for cupSODA input and output files
        self.outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
                                       dir=self._base_dir)

        self._logger.debug("Output directory is %s" % self.outdir)
        _cupsoda_infiles_dir = os.path.join(self.outdir, "INPUT")
        os.mkdir(_cupsoda_infiles_dir)
        self._cupsoda_outfiles_dir = os.path.join(self.outdir, "OUTPUT")

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Create cupSODA input files
        self._create_input_files(_cupsoda_infiles_dir)

        # Build command
        # ./cupSODA input_model_folder blocks output_folder simulation_
        # file_prefix gpu_number fitness_calculation memory_use dump
        command = [
            bin_path, _cupsoda_infiles_dir,
            str(self.n_blocks), self._cupsoda_outfiles_dir, self._prefix,
            str(self.gpu), '0', self._memory_usage,
            str(self._cupsoda_verbose)
        ]

        self._logger.info("Running cupSODA: " + ' '.join(command))
        start_time = time.time()
        # Run simulation and return trajectories
        p = subprocess.Popen(command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        # for line in iter(p.stdout.readline, b''):
        #     if 'Running time' in line:
        #         self._logger.info(line[:-1])
        (p_out, p_err) = p.communicate()
        p_out = p_out.decode('utf-8')
        p_err = p_err.decode('utf-8')
        logger_level = self._logger.logger.getEffectiveLevel()
        if logger_level <= logging.INFO:
            run_time_match = self._running_time_regex.search(p_out)
            if run_time_match:
                self._logger.info('cupSODA reported time: {} '
                                  'seconds'.format(run_time_match.group(1)))
        self._logger.debug('cupSODA stdout:\n' + p_out)
        if p_err:
            self._logger.error('cupsoda strerr:\n' + p_err)
        if p.returncode:
            raise SimulatorException(
                p_out.rstrip("at line") + "\n" + p_err.rstrip())
        tout, trajectories = self._load_trajectories(
            self._cupsoda_outfiles_dir)
        if self._cleanup:
            shutil.rmtree(self.outdir)
        end_time = time.time()
        self._logger.info("cupSODA + I/O time: {} seconds".format(end_time -
                                                                  start_time))
        return SimulationResult(self, tout, trajectories)
Exemple #9
0
    def run(self,
            tspan=None,
            initials=None,
            param_values=None,
            n_runs=1,
            output_dir=None,
            output_file_basename=None,
            cleanup=None,
            **additional_args):
        """
        Simulate a model using Kappa

        Parameters
        ----------
        tspan: vector-like
            time span of simulation
        initials: vector-like, optional
            initial conditions of model
        param_values : vector-like or dictionary, optional
            Values to use for every parameter in the model. Ordering is
            determined by the order of model.parameters.
            If not specified, parameter values will be taken directly from
            model.parameters.
        n_runs: int
            number of simulations to run
        output_dir : string, optional
            Location for temporary files generated by Kappa. If None (the
            default), uses a temporary directory provided by the system. A
            temporary directory with a random name is created within the
            supplied location.
        output_file_basename : string, optional
            This argument is used as a prefix for the temporary Kappa
            output directory, rather than the individual files.
        cleanup : bool, optional
            If True (default), delete the temporary files after the
            simulation is finished. If False, leave them in place (Useful for
            debugging). The default value, None, means to use the value
            specified in :py:func:`__init__`.
        additional_args: kwargs, optional
            Additional arguments to pass to Kappa

                * seed : int, optional
                    Random number seed for Kappa simulation

                * perturbation : string, optional
                    Optional perturbation language syntax to be appended to the
                    Kappa file. See KaSim manual for more details.

        Examples
        --------

        >>> import numpy as np
        >>> from pysb.examples import michment
        >>> from pysb.simulator import KappaSimulator
        >>> sim = KappaSimulator(michment.model, tspan=np.linspace(0, 1))
        >>> x = sim.run(n_runs=1)


        """
        super(KappaSimulator, self).run(tspan=tspan,
                                        initials=initials,
                                        param_values=param_values,
                                        _run_kwargs=locals())

        if cleanup is None:
            cleanup = self.cleanup

        tspan_lin_spaced = np.allclose(
            self.tspan,
            np.linspace(self.tspan[0], self.tspan[-1], len(self.tspan)))
        if not tspan_lin_spaced or self.tspan[0] != 0.0:
            raise SimulatorException('Kappa requires tspan to be linearly '
                                     'spaced starting at t=0')
        points = len(self.tspan)
        time = self.tspan[-1]
        plot_period = time / (len(self.tspan) - 1)

        if output_file_basename is None:
            output_file_basename = 'tmpKappa_%s_' % self.model.name

        base_directory = tempfile.mkdtemp(prefix=output_file_basename,
                                          dir=output_dir)

        base_filename = os.path.join(base_directory, self.model.name)
        kappa_filename_pattern = base_filename + '_{}.ka'
        out_filename_pattern = base_filename + '_{}_run{}.out'

        base_args = ['-u', 'time', '-l', str(time), '-p', '%.5f' % plot_period]
        if 'seed' in additional_args:
            seed = additional_args.pop('seed')
            base_args.extend(['-seed', str(seed)])

        kasim_path = pf.get_path('kasim')
        n_param_sets = self.initials_length

        gen = KappaGenerator(self.model, _exclude_ic_param=True)
        file_data_base = gen.get_content()

        # Check if a perturbation has been set
        try:
            perturbation = additional_args.pop('perturbation')
        except KeyError:
            perturbation = None

        # Check no unknown arguments have been set
        if additional_args:
            raise ValueError('Unknown argument(s): {}'.format(', '.join(
                additional_args.keys())))

        # Kappa column names, for sanity check
        kappa_col_names = tuple(['time'] +
                                [o.name for o in self.model.observables])
        tout = []
        observable_traj = []
        try:
            for pset_idx in range(n_param_sets):
                file_data = file_data_base + ''
                for param, param_value in zip(self.model.parameters,
                                              self.param_values[pset_idx]):
                    file_data += "%var: '{}' {:e}\n".format(
                        param.name, param_value)
                file_data += '\n'
                for cp, values in self.initials_dict.items():
                    file_data += "%init: {} {}\n".format(
                        values[pset_idx], gen.format_complexpattern(cp))

                # If any perturbation language code has been passed in, add it
                # to the Kappa file:
                if perturbation:
                    file_data += '%s\n' % perturbation

                # Generate the Kappa model code from the PySB model and write
                # it to the Kappa file:
                kappa_filename = kappa_filename_pattern.format(pset_idx)
                with open(kappa_filename, 'w') as kappa_file:
                    self._logger.debug('Kappa file contents:\n\n' + file_data)
                    kappa_file.write(file_data)

                for sim_rpt in range(n_runs):
                    # Run Kappa
                    out_filename = out_filename_pattern.format(
                        pset_idx, sim_rpt)
                    args = [kasim_path] + base_args + [
                        '-i', kappa_filename, '-o', out_filename
                    ]

                    # Run KaSim
                    self._logger.debug('Running: {}'.format(' '.join(args)))
                    p = subprocess.Popen(args,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE,
                                         cwd=base_directory)

                    for line in p.stdout:
                        self._logger.debug('@@' + line.decode('utf8')[:-1])
                    (p_out, p_err) = p.communicate()

                    if p.returncode:
                        raise KasimInterfaceError(
                            p_out.decode('utf8') + '\n' + p_err.decode('utf8'))

                    # The simulation data, as a numpy array
                    data = _parse_kasim_outfile(out_filename)
                    # Sanity check that observables are in correct order
                    assert data.dtype.names == kappa_col_names
                    data = data.view('<f8')
                    # Handle case with single row output
                    if data.ndim == 1:
                        data.shape = (1, data.shape[0])
                    # Parse into format
                    tout.append(data[:, 0])
                    observable_traj.append(data[:, 1:])
        finally:
            if cleanup:
                shutil.rmtree(base_directory)

        return SimulationResult(self,
                                tout=tout,
                                observables_and_expressions=observable_traj,
                                simulations_per_param_set=n_runs)