コード例 #1
0
ファイル: test_importers.py プロジェクト: ryants/pysb
def _bngl_location(filename):
    """
    Gets the location of one of BioNetGen's validation model files in BNG's
    Validate directory.
    """
    bng_dir = os.path.dirname(pf.get_path('bng'))
    bngl_file = os.path.join(bng_dir, 'Validate', filename + '.bngl')
    return bngl_file
コード例 #2
0
ファイル: test_importers.py プロジェクト: jmuhlich/pysb
def _bng_validate_directory():
    """ Location of BNG's validation models directory"""
    bng_exec = os.path.realpath(pf.get_path('bng'))
    if bng_exec.endswith('.bat'):
        conda_prefix = os.environ.get('CONDA_PREFIX')
        if conda_prefix:
            return os.path.join(conda_prefix, 'share\\bionetgen\\Validate')

    return os.path.join(os.path.dirname(bng_exec), 'Validate')
コード例 #3
0
ファイル: test_importers.py プロジェクト: ryants/pysb
def _sbml_location(filename):
    """
    Gets the location of one of BioNetGen's validation SBML files in BNG's
    Validate/INPUT_FILES directory.
    """
    bng_dir = os.path.dirname(pf.get_path('bng'))
    sbml_file = os.path.join(bng_dir, 'Validate/INPUT_FILES',
                             filename + '.xml')
    return sbml_file
コード例 #4
0
ファイル: test_importers.py プロジェクト: zhwycsz/pysb
def _bng_validate_directory():
    """ Location of BNG's validation models directory"""
    bng_exec = os.path.realpath(pf.get_path('bng'))
    if bng_exec.endswith('.bat'):
        conda_prefix = os.environ.get('CONDA_PREFIX')
        if conda_prefix:
            return os.path.join(conda_prefix, 'share\\bionetgen\\Validate')

    return os.path.join(os.path.dirname(bng_exec), 'Validate')
コード例 #5
0
    def execute(self, reload_netfile=False, skip_file_actions=True):
        """
        Executes all BNG commands in the command queue.

        Parameters
        ----------
        reload_netfile: bool or str
            If true, attempts to reload an existing .net file from a
            previous execute() iteration. If a string, the filename
            specified in the string is supplied to BNG's readFile (which can be
            any file type BNG supports, such as .net or .bngl).
            This is useful for running multiple actions in a row,
            where results need to be read into PySB before a new series of
            actions is executed.
        skip_file_actions: bool
            Only used if the previous argument is not False. Set this
            argument to True to ignore any actions block in the loaded file.
        """
        self.command_queue.write('end actions\n')
        bng_commands = self.command_queue.getvalue()

        # Generate BNGL file
        with open(self.bng_filename, 'w') as bng_file:
            output = ''
            if self.model and not reload_netfile:
                output += self.generator.get_content()
            if reload_netfile:
                filename = reload_netfile if \
                    isinstance(reload_netfile, basestring) \
                    else self.net_filename
                output += '\n  readFile({file=>"%s",skip_actions=>%d})\n' \
                    % (filename, int(skip_file_actions))
            output += bng_commands
            self._logger.debug('BNG command file contents:\n\n' + output)
            bng_file.write(output)

        # Reset the command queue, in case execute() is called again
        self.command_queue.close()
        self._init_command_queue()

        bng_exec_args = [pf.get_path('bng'), self.bng_filename]
        if not bng_exec_args[0].endswith('.bat'):
            bng_exec_args.insert(0, 'perl')

        p = subprocess.Popen(bng_exec_args,
                             cwd=self.base_directory,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        for line in iter(p.stdout.readline, b''):
            self._logger.debug(line[:-1])
        (p_out, p_err) = p.communicate()
        p_out = p_out.decode('utf-8')
        p_err = p_err.decode('utf-8')
        if p.returncode:
            raise BngInterfaceError(
                p_out.rstrip("at line") + "\n" + p_err.rstrip())
コード例 #6
0
    def __init__(self,
                 model=None,
                 verbose=False,
                 cleanup=True,
                 output_dir=None,
                 output_prefix=None,
                 timeout=30,
                 suppress_warnings=False,
                 model_additional_species=None):
        super(BngConsole,
              self).__init__(model,
                             verbose,
                             cleanup,
                             output_prefix,
                             output_dir,
                             model_additional_species=model_additional_species)

        try:
            import pexpect
        except ImportError:
            raise ImportError("Library 'pexpect' is required to use "
                              "BNGConsole, please install it to continue.\n"
                              "It is not currently available on Windows.")

        if suppress_warnings:
            warn(
                "suppress_warnings is deprecated and has no effect. Adjust "
                "the log level with the verbose argument instead.",
                category=DeprecationWarning,
                stacklevel=2)

        # Generate BNGL file
        if self.model:
            with open(self.bng_filename, mode='w') as bng_file:
                bng_file.write(self.generator.get_content())

        # Start BNG Console and load BNGL
        bng_path = pf.get_path('bng')
        bng_exec_path = '%s --console' % bng_path
        if not bng_path.endswith('.bat'):
            bng_exec_path = 'perl %s' % bng_exec_path
        self.console = pexpect.spawn(bng_exec_path,
                                     cwd=self.base_directory,
                                     timeout=timeout)
        self._console_wait()
        if self.model:
            self.console.sendline('load %s' % self.bng_filename)
            self._console_wait()
コード例 #7
0
ファイル: bng.py プロジェクト: LoLab-VU/pysb
    def __init__(self, model=None, verbose=False, cleanup=True,
                 output_dir=None, output_prefix=None, timeout=30,
                 suppress_warnings=False, model_additional_species=None):
        super(BngConsole, self).__init__(
            model, verbose, cleanup, output_prefix, output_dir,
            model_additional_species=model_additional_species
        )

        try:
            import pexpect
        except ImportError:
            raise ImportError("Library 'pexpect' is required to use "
                              "BNGConsole, please install it to continue.\n"
                              "It is not currently available on Windows.")

        if suppress_warnings:
            warn("suppress_warnings is deprecated and has no effect. Adjust "
                 "the log level with the verbose argument instead.",
                 category=DeprecationWarning,
                 stacklevel=2)

        # Generate BNGL file
        if self.model:
            with open(self.bng_filename, mode='w') as bng_file:
                bng_file.write(self.generator.get_content())

        # Start BNG Console and load BNGL
        bng_path = pf.get_path('bng')
        bng_exec_path = '%s --console' % bng_path
        if not bng_path.endswith('.bat'):
            bng_exec_path = 'perl %s' % bng_exec_path
        self.console = pexpect.spawn(bng_exec_path,
                                     cwd=self.base_directory,
                                     timeout=timeout)
        self._console_wait()
        if self.model:
            self.console.sendline('load %s' % self.bng_filename)
            self._console_wait()
コード例 #8
0
    def run(self, tspan=None, initials=None, param_values=None):
        """Perform a set of integrations.

        Returns a :class:`.SimulationResult` object.

        Parameters
        ----------
        tspan : list-like, optional
            Time values at which the integrations are sampled. The first and
            last values define the time range.
        initials : list-like, optional
            Initial species concentrations for all simulations. Dimensions are
            number of simulation x number of species.    
        param_values : list-like, optional
            Parameters for all simulations. Dimensions are number of
            simulations x number of parameters.

        Returns
        -------
        A :class:`SimulationResult` object

        Notes
        -----
        1. An exception is thrown if `tspan` is not defined in either
           `__init__`or `run`.
           
        2. If neither `initials` nor `param_values` are defined in either 
           `__init__` or `run` a single simulation is run with the initial 
           concentrations and parameter values defined in the model.

        """
        super(CupSodaSimulator, self).run(tspan=tspan,
                                          initials=initials,
                                          param_values=param_values,
                                          _run_kwargs=[])

        # Create directories for cupSODA input and output files
        self.outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
                                       dir=self._base_dir)

        self._logger.debug("Output directory is %s" % self.outdir)
        _cupsoda_infiles_dir = os.path.join(self.outdir, "INPUT")
        os.mkdir(_cupsoda_infiles_dir)
        self._cupsoda_outfiles_dir = os.path.join(self.outdir, "OUTPUT")

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Create cupSODA input files
        self._create_input_files(_cupsoda_infiles_dir)

        # Build command
        # ./cupSODA input_model_folder blocks output_folder simulation_
        # file_prefix gpu_number fitness_calculation memory_use dump
        command = [
            bin_path, _cupsoda_infiles_dir,
            str(self.n_blocks), self._cupsoda_outfiles_dir, self._prefix,
            str(self.gpu), '0', self._memory_usage,
            str(self._cupsoda_verbose)
        ]

        self._logger.info("Running cupSODA: " + ' '.join(command))
        start_time = time.time()
        # Run simulation and return trajectories
        p = subprocess.Popen(command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        # for line in iter(p.stdout.readline, b''):
        #     if 'Running time' in line:
        #         self._logger.info(line[:-1])
        (p_out, p_err) = p.communicate()
        p_out = p_out.decode('utf-8')
        p_err = p_err.decode('utf-8')
        logger_level = self._logger.logger.getEffectiveLevel()
        if logger_level <= logging.INFO:
            run_time_match = self._running_time_regex.search(p_out)
            if run_time_match:
                self._logger.info('cupSODA reported time: {} '
                                  'seconds'.format(run_time_match.group(1)))
        self._logger.debug('cupSODA stdout:\n' + p_out)
        if p_err:
            self._logger.error('cupsoda strerr:\n' + p_err)
        if p.returncode:
            raise SimulatorException(
                p_out.rstrip("at line") + "\n" + p_err.rstrip())
        tout, trajectories = self._load_trajectories(
            self._cupsoda_outfiles_dir)
        if self._cleanup:
            shutil.rmtree(self.outdir)
        end_time = time.time()
        self._logger.info("cupSODA + I/O time: {} seconds".format(end_time -
                                                                  start_time))
        return SimulationResult(self, tout, trajectories)
コード例 #9
0
from pysb import *
from pysb import pathfinder

# this is your pythonanywhere.com username
user_name = 'rah'

bngl_path = '/home/' + str(user_name) + '/BioNetGen-2.3.1/'
pathfinder.set_path('bng', bngl_path)
pathfinder.get_path('bng')

Model()

# Physical and geometric constants
Parameter('NA', 6.0e23)  # Avogadro's num
Parameter('f', 0.01)  # scaling factor
Expression('Vo', f * 1e-10)  # L
Expression('V', f * 3e-12)  # L

# Initial concentrations
Parameter('EGF_conc', 2e-9)  # nM
Expression('EGF0', EGF_conc * NA * Vo)  # nM
Expression('EGFR0', f * 1.8e5)  # copy per cell

# Rate constants
Expression('kp1', 9.0e7 / (NA * Vo))  # input /M/sec
Parameter('km1', 0.06)  # /sec

Monomer('EGF', ['R'])
Monomer('EGFR', ['L', 'CR1', 'Y1068'], {'Y1068': ['U', 'P']})

Initial(EGF(R=None), EGF0)
コード例 #10
0
    def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
                   tout):
        _indirs = {}
        _outdirs = {}
        p = {}

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Start simulations
        for gpu in gpus:
            _indirs[gpu] = os.path.join(
                outdir, "INPUT_GPU{}_{}".format(gpu, chunk_idx))
            os.mkdir(_indirs[gpu])
            _outdirs[gpu] = os.path.join(
                outdir, "OUTPUT_GPU{}_{}".format(gpu, chunk_idx))

            # Create cupSODA input files
            self._create_input_files(_indirs[gpu], sims[gpu], cmtx)

            # Build command
            # ./cupSODA input_model_folder blocks output_folder simulation_
            # file_prefix gpu_number fitness_calculation memory_use dump
            command = [
                bin_path, _indirs[gpu],
                str(self.n_blocks), _outdirs[gpu], self._prefix,
                str(gpu), '0', self._memory_usage,
                str(self._cupsoda_verbose)
            ]

            self._logger.info("Running cupSODA: " + ' '.join(command))

            # Run simulation and return trajectories
            p[gpu] = subprocess.Popen(command,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        # Read results
        for gpu in gpus:
            (p_out, p_err) = p[gpu].communicate()
            p_out = p_out.decode('utf-8')
            p_err = p_err.decode('utf-8')
            logger_level = self._logger.logger.getEffectiveLevel()
            if logger_level <= logging.INFO:
                run_time_match = self._running_time_regex.search(p_out)
                if run_time_match:
                    self._logger.info('cupSODA GPU {} chunk {} reported '
                                      'time: {} seconds'.format(
                                          gpu, chunk_idx,
                                          run_time_match.group(1)))
            self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
                gpu, chunk_idx, p_out))
            if p_err:
                self._logger.error('cupSODA GPU {} chunk {} '
                                   'stderr:\n{}'.format(gpu, chunk_idx, p_err))
            if p[gpu].returncode:
                raise SimulatorException(
                    "cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
                        gpu, chunk_idx, p_out.rstip("at line"),
                        p_err.rstrip()))
            tout_run, trajectories_run = self._load_trajectories(
                _outdirs[gpu], sims[gpu])
            if trajectories is None:
                tout = tout_run
                trajectories = trajectories_run
            else:
                tout = np.concatenate((tout, tout_run))
                trajectories = np.concatenate((trajectories, trajectories_run))

        return tout, trajectories
コード例 #11
0
ファイル: cupsoda.py プロジェクト: LoLab-VU/pysb
    def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
                   tout):
        _indirs = {}
        _outdirs = {}
        p = {}

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Start simulations
        for gpu in gpus:
            _indirs[gpu] = os.path.join(outdir, "INPUT_GPU{}_{}".format(
                gpu, chunk_idx))
            os.mkdir(_indirs[gpu])
            _outdirs[gpu] = os.path.join(outdir, "OUTPUT_GPU{}_{}".format(
                gpu, chunk_idx))

            # Create cupSODA input files
            self._create_input_files(_indirs[gpu], sims[gpu], cmtx)

            # Build command
            # ./cupSODA input_model_folder blocks output_folder simulation_
            # file_prefix gpu_number fitness_calculation memory_use dump
            command = [bin_path, _indirs[gpu], str(self.n_blocks),
                       _outdirs[gpu], self._prefix, str(gpu),
                       '0', self._memory_usage, str(self._cupsoda_verbose)]

            self._logger.info("Running cupSODA: " + ' '.join(command))

            # Run simulation and return trajectories
            p[gpu] = subprocess.Popen(command, stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)

        # Read results
        for gpu in gpus:
            (p_out, p_err) = p[gpu].communicate()
            p_out = p_out.decode('utf-8')
            p_err = p_err.decode('utf-8')
            logger_level = self._logger.logger.getEffectiveLevel()
            if logger_level <= logging.INFO:
                run_time_match = self._running_time_regex.search(p_out)
                if run_time_match:
                    self._logger.info('cupSODA GPU {} chunk {} reported '
                                      'time: {} seconds'.format(
                        gpu,
                        chunk_idx,
                        run_time_match.group(1)))
            self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
                gpu, chunk_idx, p_out))
            if p_err:
                self._logger.error('cupSODA GPU {} chunk {} '
                                   'stderr:\n{}'.format(
                    gpu, chunk_idx, p_err))
            if p[gpu].returncode:
                raise SimulatorException(
                    "cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
                        gpu, chunk_idx, p_out.rstip("at line"), p_err.rstrip()
                    )
                )
            tout_run, trajectories_run = self._load_trajectories(
                _outdirs[gpu], sims[gpu])
            if trajectories is None:
                tout = tout_run
                trajectories = trajectories_run
            else:
                tout = np.concatenate((tout, tout_run))
                trajectories = np.concatenate(
                    (trajectories, trajectories_run))

        return tout, trajectories
コード例 #12
0
ファイル: stochkit.py プロジェクト: shf43/pysb
    def _run_stochkit(self,
                      t=20,
                      t_length=100,
                      number_of_trajectories=1,
                      seed=None,
                      algorithm='ssa',
                      method=None,
                      num_processors=1,
                      stats=False,
                      epsilon=None,
                      threshold=None):

        extra_args = '-p {:d}'.format(num_processors)

        # Random seed for stochastic simulation
        if seed is not None:
            extra_args += ' --seed {:d}'.format(seed)

        # Keep all the trajectories by default
        extra_args += ' --keep-trajectories'

        # Number of trajectories
        extra_args += ' --realizations {:d}'.format(number_of_trajectories)

        # We generally don't need the extra stats
        if not stats:
            extra_args += ' --no-stats'

        if method is not None:  # This only works for StochKit 2.1
            extra_args += ' --method {}'.format(method)

        if epsilon is not None:
            extra_args += ' --epsilon {:f}'.format(epsilon)

        if threshold is not None:
            extra_args += ' --threshold {:d}'.format(threshold)

        # Find binary for selected algorithm (SSA, Tau-leaping, ...)
        if algorithm not in ['ssa', 'tau_leaping']:
            raise SimulatorException(
                "algorithm must be 'ssa' or 'tau_leaping'")

        executable = get_path('stochkit_{}'.format(algorithm))

        # Output model file to directory
        fname = os.path.join(self._outdir, 'pysb.xml')

        trajectories = []
        for i in range(len(self.initials)):
            # We write all StochKit output files to a temporary folder
            prefix_outdir = os.path.join(self._outdir, 'output_{}'.format(i))

            # Export model file
            stoch_xml = StochKitExporter(self._model).export(
                self.initials[i], self.param_values[i])
            self._logger.log(EXTENDED_DEBUG, 'StochKit XML:\n' + stoch_xml)
            with open(fname, 'w') as f:
                f.write(stoch_xml)

            # Assemble the argument list
            args = '--model {} --out-dir {} -t {:f} -i {:d}'.format(
                fname, prefix_outdir, t, t_length - 1)

            # If we are using local mode, shell out and run StochKit
            # (SSA or Tau-leaping or ODE)
            cmd = '{} {} {}'.format(executable, args, extra_args)
            self._logger.debug("StochKit run {} of {} (cmd: {})".format(
                (i + 1), len(self.initials), cmd))

            # Execute
            try:
                handle = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          shell=True)
                return_code = handle.wait()
            except OSError as e:
                raise SimulatorException("StochKit execution failed: \
                {0}\n{1}".format(cmd, e))

            try:
                stderr = handle.stderr.read()
            except Exception as e:
                stderr = 'Error reading stderr: {0}'.format(e)
            try:
                stdout = handle.stdout.read()
            except Exception as e:
                stdout = 'Error reading stdout: {0}'.format(e)

            if return_code != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            traj_dir = os.path.join(prefix_outdir, 'trajectories')
            try:
                trajectories.extend([
                    np.loadtxt(os.path.join(traj_dir, f))
                    for f in sorted(os.listdir(traj_dir))
                ])
            except Exception as e:
                raise SimulatorException(
                    "Error reading StochKit trajectories: {0}"
                    "\nSTDOUT:{1}\nSTDERR:{2}".format(e, stdout, stderr))

            if len(trajectories) == 0 or len(stderr) != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            self._logger.debug("StochKit STDOUT:\n{0}".format(stdout))

        # Return data
        return trajectories
コード例 #13
0
ファイル: kappa.py プロジェクト: zhwycsz/pysb
    def run(self,
            tspan=None,
            initials=None,
            param_values=None,
            n_runs=1,
            output_dir=None,
            output_file_basename=None,
            cleanup=None,
            **additional_args):
        """
        Simulate a model using Kappa

        Parameters
        ----------
        tspan: vector-like
            time span of simulation
        initials: vector-like, optional
            initial conditions of model
        param_values : vector-like or dictionary, optional
            Values to use for every parameter in the model. Ordering is
            determined by the order of model.parameters.
            If not specified, parameter values will be taken directly from
            model.parameters.
        n_runs: int
            number of simulations to run
        output_dir : string, optional
            Location for temporary files generated by Kappa. If None (the
            default), uses a temporary directory provided by the system. A
            temporary directory with a random name is created within the
            supplied location.
        output_file_basename : string, optional
            This argument is used as a prefix for the temporary Kappa
            output directory, rather than the individual files.
        cleanup : bool, optional
            If True (default), delete the temporary files after the
            simulation is finished. If False, leave them in place (Useful for
            debugging). The default value, None, means to use the value
            specified in :py:func:`__init__`.
        additional_args: kwargs, optional
            Additional arguments to pass to Kappa

                * seed : int, optional
                    Random number seed for Kappa simulation

                * perturbation : string, optional
                    Optional perturbation language syntax to be appended to the
                    Kappa file. See KaSim manual for more details.

        Examples
        --------

        >>> import numpy as np
        >>> from pysb.examples import michment
        >>> from pysb.simulator import KappaSimulator
        >>> sim = KappaSimulator(michment.model, tspan=np.linspace(0, 1))
        >>> x = sim.run(n_runs=1)


        """
        super(KappaSimulator, self).run(tspan=tspan,
                                        initials=initials,
                                        param_values=param_values,
                                        _run_kwargs=locals())

        if cleanup is None:
            cleanup = self.cleanup

        tspan_lin_spaced = np.allclose(
            self.tspan,
            np.linspace(self.tspan[0], self.tspan[-1], len(self.tspan)))
        if not tspan_lin_spaced or self.tspan[0] != 0.0:
            raise SimulatorException('Kappa requires tspan to be linearly '
                                     'spaced starting at t=0')
        points = len(self.tspan)
        time = self.tspan[-1]
        plot_period = time / (len(self.tspan) - 1)

        if output_file_basename is None:
            output_file_basename = 'tmpKappa_%s_' % self.model.name

        base_directory = tempfile.mkdtemp(prefix=output_file_basename,
                                          dir=output_dir)

        base_filename = os.path.join(base_directory, self.model.name)
        kappa_filename_pattern = base_filename + '_{}.ka'
        out_filename_pattern = base_filename + '_{}_run{}.out'

        base_args = ['-u', 'time', '-l', str(time), '-p', '%.5f' % plot_period]
        if 'seed' in additional_args:
            seed = additional_args.pop('seed')
            base_args.extend(['-seed', str(seed)])

        kasim_path = pf.get_path('kasim')
        n_param_sets = self.initials_length

        gen = KappaGenerator(self.model, _exclude_ic_param=True)
        file_data_base = gen.get_content()

        # Check if a perturbation has been set
        try:
            perturbation = additional_args.pop('perturbation')
        except KeyError:
            perturbation = None

        # Check no unknown arguments have been set
        if additional_args:
            raise ValueError('Unknown argument(s): {}'.format(', '.join(
                additional_args.keys())))

        # Kappa column names, for sanity check
        kappa_col_names = tuple(['time'] +
                                [o.name for o in self.model.observables])
        tout = []
        observable_traj = []
        try:
            for pset_idx in range(n_param_sets):
                file_data = file_data_base + ''
                for param, param_value in zip(self.model.parameters,
                                              self.param_values[pset_idx]):
                    file_data += "%var: '{}' {:e}\n".format(
                        param.name, param_value)
                file_data += '\n'
                for cp, values in self.initials_dict.items():
                    file_data += "%init: {} {}\n".format(
                        values[pset_idx], gen.format_complexpattern(cp))

                # If any perturbation language code has been passed in, add it
                # to the Kappa file:
                if perturbation:
                    file_data += '%s\n' % perturbation

                # Generate the Kappa model code from the PySB model and write
                # it to the Kappa file:
                kappa_filename = kappa_filename_pattern.format(pset_idx)
                with open(kappa_filename, 'w') as kappa_file:
                    self._logger.debug('Kappa file contents:\n\n' + file_data)
                    kappa_file.write(file_data)

                for sim_rpt in range(n_runs):
                    # Run Kappa
                    out_filename = out_filename_pattern.format(
                        pset_idx, sim_rpt)
                    args = [kasim_path] + base_args + [
                        '-i', kappa_filename, '-o', out_filename
                    ]

                    # Run KaSim
                    self._logger.debug('Running: {}'.format(' '.join(args)))
                    p = subprocess.Popen(args,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE,
                                         cwd=base_directory)

                    for line in p.stdout:
                        self._logger.debug('@@' + line.decode('utf8')[:-1])
                    (p_out, p_err) = p.communicate()

                    if p.returncode:
                        raise KasimInterfaceError(
                            p_out.decode('utf8') + '\n' + p_err.decode('utf8'))

                    # The simulation data, as a numpy array
                    data = _parse_kasim_outfile(out_filename)
                    # Sanity check that observables are in correct order
                    assert data.dtype.names == kappa_col_names
                    data = data.view('<f8')
                    # Handle case with single row output
                    if data.ndim == 1:
                        data.shape = (1, data.shape[0])
                    # Parse into format
                    tout.append(data[:, 0])
                    observable_traj.append(data[:, 1:])
        finally:
            if cleanup:
                shutil.rmtree(base_directory)

        return SimulationResult(self,
                                tout=tout,
                                observables_and_expressions=observable_traj,
                                simulations_per_param_set=n_runs)
コード例 #14
0
def run_simulation(model,
                   time=10000,
                   points=200,
                   cleanup=True,
                   output_prefix=None,
                   output_dir=None,
                   flux_map=False,
                   perturbation=None,
                   seed=None,
                   verbose=False):
    """Runs the given model using KaSim and returns the parsed results.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSim.
    time : number
        The amount of time (in arbitrary units) to run a simulation.
        Identical to the -u time -l argument when using KaSim at the command
        line.
        Default value is 10000. If set to 0, no simulation will be run.
    points : integer
        The number of data points to collect for plotting.
        Note that this is not identical to the -p argument of KaSim when
        called from the command line, which denotes plot period (time interval
        between points in plot).
        Default value is 200. Note that the number of points actually returned
        by the simulator will be points + 1 (including the 0 point).
    cleanup : boolean
        Specifies whether output files produced by KaSim should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    flux_map: boolean
        Specifies whether or not to produce the flux map (generated over the
        full duration of the simulation). Default value is False.
    perturbation : string or None
        Optional perturbation language syntax to be appended to the Kappa file.
        See KaSim manual for more details. Default value is None (no
        perturbation).
    seed : integer
        A seed integer for KaSim random number generator. Set to None to
        allow KaSim to use a random seed (default) or supply a seed for
        deterministic behaviour (e.g. for testing)
    verbose : boolean
        Whether to pass the output of KaSim through to stdout/stderr.

    Returns
    -------
    If flux_map is False, returns the kasim simulation data as a Numpy ndarray.
    Data is accessed using the syntax::

            results[index_name]

    The index 'time' gives the time coordinates of the simulation. Data for the
    observables can be accessed by indexing the array with the names of the
    observables. Each entry in the ndarray has length points + 1, due to the
    inclusion of both the zero point and the final timepoint.

    If flux_map is True, returns an instance of SimulationResult, a namedtuple
    with two members, `timecourse` and `flux_map`. The `timecourse` field
    contains the simulation ndarray, and the `flux_map` field is an instance of
    a pygraphviz AGraph containing the flux map. The flux map can be rendered
    as a pdf using the dot layout program as follows::

        fluxmap.draw('fluxmap.pdf', prog='dot')
    """

    gen = KappaGenerator(model)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, model.name)
    kappa_filename = base_filename + '.ka'
    fm_filename = base_filename + '_fm.dot'
    out_filename = base_filename + '.out'

    if points == 0:
        raise ValueError('The number of data points cannot be zero.')
    plot_period = (float(time) / points) if time > 0 else 1.0

    args = [
        '-i', kappa_filename, '-u', 'time', '-l',
        str(time), '-p',
        '%.5f' % plot_period, '-o', out_filename
    ]

    if seed:
        args.extend(['-seed', str(seed)])

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        kappa_file.write(gen.get_content())
        # If desired, add instructions to the kappa file to generate the
        # flux map:
        if flux_map:
            kappa_file.write('%%mod: [true] do $FLUX "%s" [true]\n' %
                             fm_filename)
        # If any perturbation language code has been passed in, add it to
        # the Kappa file:
        if perturbation:
            kappa_file.write('\n%s\n' % perturbation)

    # Run KaSim
    kasim_path = pf.get_path('kasim')
    p = subprocess.Popen([kasim_path] + args,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         cwd=base_directory)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasimInterfaceError(p_out + '\n' + p_err)

    # The simulation data, as a numpy array
    data = _parse_kasim_outfile(out_filename)

    if flux_map:
        try:
            import pygraphviz
            flux_graph = pygraphviz.AGraph(fm_filename)
        except ImportError:
            if cleanup:
                raise RuntimeError("Couldn't import pygraphviz, which is "
                                   "required to return the flux map as a "
                                   "pygraphviz AGraph object. Either install "
                                   "pygraphviz or set cleanup=False to retain "
                                   "dot files.")
            else:
                warnings.warn("pygraphviz could not be imported so no AGraph "
                              "object returned (returning None); flux map "
                              "dot file available at %s" % fm_filename)
                flux_graph = None

    if cleanup:
        shutil.rmtree(base_directory)

    # If a flux map was generated, return both the simulation output and the
    # flux map as a pygraphviz graph
    if flux_map:
        return SimulationResult(data, flux_graph)
    # If no flux map was requested, return only the simulation data
    else:
        return data
コード例 #15
0
def run_static_analysis(model,
                        influence_map=False,
                        contact_map=False,
                        cleanup=True,
                        output_prefix=None,
                        output_dir=None,
                        verbose=False):
    """Run static analysis (KaSa) on to get the contact and influence maps.

    If neither influence_map nor contact_map are set to True, then a ValueError
    is raised.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSa.
    influence_map : boolean
        Whether to compute the influence map.
    contact_map : boolean
        Whether to compute the contact map.
    cleanup : boolean
        Specifies whether output files produced by KaSa should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    verbose : boolean
        Whether to pass the output of KaSa through to stdout/stderr.

    Returns
    -------
    StaticAnalysisResult, a namedtuple with two fields, `contact_map` and
    `influence_map`, each containing the respective result as an instance
    of a pygraphviz AGraph. If the either the contact_map or influence_map
    argument to the function is False, the corresponding entry in the
    StaticAnalysisResult returned by the function will be None.
    """

    # Make sure the user has asked for an output!
    if not influence_map and not contact_map:
        raise ValueError('Either contact_map or influence_map (or both) must '
                         'be set to True in order to perform static analysis.')

    gen = KappaGenerator(model, _warn_no_ic=False)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, str(model.name))
    kappa_filename = base_filename + '.ka'
    im_filename = base_filename + '_im.dot'
    cm_filename = base_filename + '_cm.dot'

    # NOTE: in the args passed to KaSa, the directory for the .dot files is
    # specified by the --output_directory option, and the output_contact_map
    # and output_influence_map should only be the base filenames (without
    # a directory prefix).
    # Contact map args:
    if contact_map:
        cm_args = [
            '--compute-contact-map', '--output-contact-map',
            os.path.basename(cm_filename), '--output-contact-map-directory',
            base_directory
        ]
    else:
        cm_args = ['--no-compute-contact-map']
    # Influence map args:
    if influence_map:
        im_args = [
            '--compute-influence-map', '--output-influence-map',
            os.path.basename(im_filename), '--output-influence-map-directory',
            base_directory
        ]
    else:
        im_args = ['--no-compute-influence-map']
    # Full arg list
    args = [kappa_filename] + cm_args + im_args

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        kappa_file.write(gen.get_content())

    # Run KaSa using the given args
    kasa_path = pf.get_path('kasa')
    p = subprocess.Popen([kasa_path] + args,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         cwd=base_directory)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasaInterfaceError(p_out + '\n' + p_err)

    # Try to create the graphviz objects from the .dot files created
    try:
        import pygraphviz
        # Convert the contact map to a Graph
        cmap = pygraphviz.AGraph(cm_filename) if contact_map else None
        imap = pygraphviz.AGraph(im_filename) if influence_map else None
    except ImportError:
        if cleanup:
            raise RuntimeError(
                "Couldn't import pygraphviz, which is "
                "required to return the influence and contact maps "
                " as pygraphviz AGraph objects. Either install "
                "pygraphviz or set cleanup=False to retain "
                "dot files.")
        else:
            warnings.warn("pygraphviz could not be imported so no AGraph "
                          "objects returned (returning None); "
                          "contact/influence maps available at %s" %
                          base_directory)
            cmap = None
            imap = None

    # Clean up the temp directory if desired
    if cleanup:
        shutil.rmtree(base_directory)

    return StaticAnalysisResult(cmap, imap)
コード例 #16
0
ファイル: kappa.py プロジェクト: zhwycsz/pysb
def run_static_analysis(model,
                        influence_map=False,
                        contact_map=False,
                        cleanup=True,
                        output_prefix=None,
                        output_dir=None,
                        verbose=False):
    """Run static analysis (KaSa) on to get the contact and influence maps.

    If neither influence_map nor contact_map are set to True, then a ValueError
    is raised.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSa.
    influence_map : boolean
        Whether to compute the influence map.
    contact_map : boolean
        Whether to compute the contact map.
    cleanup : boolean
        Specifies whether output files produced by KaSa should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    verbose : boolean
        Whether to pass the output of KaSa through to stdout/stderr.

    Returns
    -------
    StaticAnalysisResult, a namedtuple with two fields, `contact_map` and
    `influence_map`, each containing the respective result as an instance
    of a networkx MultiGraph. If the either the contact_map or influence_map
    argument to the function is False, the corresponding entry in the
    StaticAnalysisResult returned by the function will be None.

    Notes
    -----
    To view a networkx file graphically, use `draw_network`::

        import networkx as nx
        nx.draw_networkx(g, with_labels=True)

    You can use `graphviz_layout` to use graphviz for layout (requires pydot
    library)::

        import networkx as nx
        pos = nx.drawing.nx_pydot.graphviz_layout(g, prog='dot')
        nx.draw_networkx(g, pos, with_labels=True)

    For further information, see the networkx documentation on visualization:
    https://networkx.github.io/documentation/latest/reference/drawing.html
    """

    # Make sure the user has asked for an output!
    if not influence_map and not contact_map:
        raise ValueError('Either contact_map or influence_map (or both) must '
                         'be set to True in order to perform static analysis.')

    gen = KappaGenerator(model, _warn_no_ic=False)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, str(model.name))
    kappa_filename = base_filename + '.ka'
    im_filename = base_filename + '_im.dot'
    cm_filename = base_filename + '_cm.dot'

    # NOTE: in the args passed to KaSa, the directory for the .dot files is
    # specified by the --output_directory option, and the output_contact_map
    # and output_influence_map should only be the base filenames (without
    # a directory prefix).
    # Contact map args:
    if contact_map:
        cm_args = [
            '--compute-contact-map', '--output-contact-map',
            os.path.basename(cm_filename), '--output-contact-map-directory',
            base_directory
        ]
    else:
        cm_args = ['--no-compute-contact-map']
    # Influence map args:
    if influence_map:
        im_args = [
            '--compute-influence-map', '--output-influence-map',
            os.path.basename(im_filename), '--output-influence-map-directory',
            base_directory
        ]
    else:
        im_args = ['--no-compute-influence-map']
    # Full arg list
    args = [kappa_filename] + cm_args + im_args

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        file_data = gen.get_content()
        logger.debug('Kappa file contents:\n\n' + file_data)
        kappa_file.write(file_data)

    # Run KaSa using the given args
    kasa_path = pf.get_path('kasa')
    p = subprocess.Popen([kasa_path] + args,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         cwd=base_directory)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasaInterfaceError(
            p_out.decode('utf8') + '\n' + p_err.decode('utf8'))

    # Try to create the graphviz objects from the .dot files created
    try:
        # Convert the contact map to a Graph
        cmap = read_dot(cm_filename) if contact_map else None
        imap = read_dot(im_filename) if influence_map else None
    except ImportError:
        if cleanup:
            raise
        else:
            warnings.warn("The pydot library could not be "
                          "imported, so no MultiGraph "
                          "object returned (returning None); "
                          "contact/influence maps available at %s" %
                          base_directory)
            cmap = None
            imap = None

    # Clean up the temp directory if desired
    if cleanup:
        shutil.rmtree(base_directory)

    return StaticAnalysisResult(cmap, imap)
コード例 #17
0
ファイル: kappa.py プロジェクト: LoLab-VU/pysb
def run_simulation(model, time=10000, points=200, cleanup=True,
                   output_prefix=None, output_dir=None, flux_map=False,
                   perturbation=None, seed=None, verbose=False):
    """Runs the given model using KaSim and returns the parsed results.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSim.
    time : number
        The amount of time (in arbitrary units) to run a simulation.
        Identical to the -u time -l argument when using KaSim at the command
        line.
        Default value is 10000. If set to 0, no simulation will be run.
    points : integer
        The number of data points to collect for plotting.
        Note that this is not identical to the -p argument of KaSim when
        called from the command line, which denotes plot period (time interval
        between points in plot).
        Default value is 200. Note that the number of points actually returned
        by the simulator will be points + 1 (including the 0 point).
    cleanup : boolean
        Specifies whether output files produced by KaSim should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    flux_map: boolean
        Specifies whether or not to produce the flux map (generated over the
        full duration of the simulation). Default value is False.
    perturbation : string or None
        Optional perturbation language syntax to be appended to the Kappa file.
        See KaSim manual for more details. Default value is None (no
        perturbation).
    seed : integer
        A seed integer for KaSim random number generator. Set to None to
        allow KaSim to use a random seed (default) or supply a seed for
        deterministic behaviour (e.g. for testing)
    verbose : boolean
        Whether to pass the output of KaSim through to stdout/stderr.

    Returns
    -------
    If flux_map is False, returns the kasim simulation data as a Numpy ndarray.
    Data is accessed using the syntax::

            results[index_name]

    The index 'time' gives the time coordinates of the simulation. Data for the
    observables can be accessed by indexing the array with the names of the
    observables. Each entry in the ndarray has length points + 1, due to the
    inclusion of both the zero point and the final timepoint.

    If flux_map is True, returns an instance of SimulationResult, a namedtuple
    with two members, `timecourse` and `flux_map`. The `timecourse` field
    contains the simulation ndarray, and the `flux_map` field is an instance of
    a networkx MultiGraph containing the flux map. For details on viewing
    the flux map graphically see :func:`run_static_analysis` (notes section).
    """

    gen = KappaGenerator(model)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, model.name)
    kappa_filename = base_filename + '.ka'
    fm_filename = base_filename + '_fm.dot'
    out_filename = base_filename + '.out'

    if points == 0:
        raise ValueError('The number of data points cannot be zero.')
    plot_period = (float(time) / points) if time > 0 else 1.0

    args = ['-i', kappa_filename, '-u', 'time', '-l', str(time),
            '-p', '%.5f' % plot_period, '-o', out_filename]

    if seed:
        args.extend(['-seed', str(seed)])

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        file_data = gen.get_content()
        # If desired, add instructions to the kappa file to generate the
        # flux map:
        if flux_map:
            file_data += '%%mod: [true] do $DIN "%s" [true];\n' % fm_filename

        # If any perturbation language code has been passed in, add it to
        # the Kappa file:
        if perturbation:
            file_data += '\n%s\n' % perturbation

        logger.debug('Kappa file contents:\n\n' + file_data)
        kappa_file.write(file_data)

    # Run KaSim
    kasim_path = pf.get_path('kasim')
    p = subprocess.Popen([kasim_path] + args,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                         cwd=base_directory)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasimInterfaceError(
            p_out.decode('utf8') + '\n' + p_err.decode('utf8'))

    # The simulation data, as a numpy array
    data = _parse_kasim_outfile(out_filename)

    if flux_map:
        try:
            flux_graph = read_dot(fm_filename)
        except ImportError:
            if cleanup:
                raise
            else:
                warnings.warn(
                        "The pydot library could not be "
                        "imported, so no MultiGraph "
                        "object returned (returning None); flux map "
                        "dot file available at %s" % fm_filename)
                flux_graph = None

    if cleanup:
        shutil.rmtree(base_directory)

    # If a flux map was generated, return both the simulation output and the
    # flux map as a networkx multigraph
    if flux_map:
        return SimulationResult(data, flux_graph)
    # If no flux map was requested, return only the simulation data
    else:
        return data
コード例 #18
0
    def execute(self, reload_netfile=False, skip_file_actions=True):
        """
        Executes all BNG commands in the command queue.

        Parameters
        ----------
        reload_netfile: bool or str
            If true, attempts to reload an existing .net file from a
            previous execute() iteration. If a string, the filename
            specified in the string is supplied to BNG's readFile (which can be
            any file type BNG supports, such as .net or .bngl).
            This is useful for running multiple actions in a row,
            where results need to be read into PySB before a new series of
            actions is executed.
        skip_file_actions: bool
            Only used if the previous argument is not False. Set this
            argument to True to ignore any actions block in the loaded file.
        """
        self.command_queue.write('end actions\n')
        bng_commands = self.command_queue.getvalue()

        # Generate BNGL file
        with open(self.bng_filename, 'w') as bng_file:
            output = ''
            if self.model and not reload_netfile:
                output += self.generator.get_content()
            if reload_netfile:
                filename = reload_netfile if \
                    isinstance(reload_netfile, basestring) \
                    else self.net_filename
                output += '\n  readFile({file=>"%s",skip_actions=>%d})\n' \
                    % (filename, int(skip_file_actions))
            output += bng_commands
            bng_file.write(output)
            lines = output.split('\n')
            line_number_format = 'L{{:0{}d}}  {{}}'.format(
                int(numpy.ceil(numpy.log10(len(lines)))))
            output = '\n'.join(
                line_number_format.format(ln + 1, line)
                for ln, line in enumerate(lines))
            self._logger.debug('BNG command file contents:\n' + output)

        # Reset the command queue, in case execute() is called again
        self.command_queue.close()
        self._init_command_queue()

        bng_exec_args = [pf.get_path('bng'), self.bng_filename]
        if not bng_exec_args[0].endswith('.bat'):
            bng_exec_args.insert(0, 'perl')

        p = subprocess.Popen(bng_exec_args,
                             cwd=self.base_directory,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # output lines as DEBUG, unless a warning or error is encountered
        capture_error = False
        captured_error_lines = []
        for line in iter(p.stdout.readline, b''):
            line = line[:-1].decode('utf-8')
            if line.startswith('ERROR:'):
                capture_error = True
            if capture_error:
                captured_error_lines.append(line)

            self._logger.debug(line)

        # p_out is already consumed, so only get p_err
        (_, p_err) = p.communicate()
        p_err = p_err.decode('utf-8')
        if p.returncode or captured_error_lines:
            raise BngInterfaceError('\n'.join(captured_error_lines) + "\n" +
                                    p_err.rstrip())
コード例 #19
0
ファイル: stochkit.py プロジェクト: alubbock/pysb
    def _run_stochkit(self, t=20, t_length=100, number_of_trajectories=1,
                      seed=None, algorithm='ssa', method=None,
                      num_processors=1, stats=False, epsilon=None,
                      threshold=None):

        extra_args = '-p {:d}'.format(num_processors)

        # Random seed for stochastic simulation
        if seed is not None:
            extra_args += ' --seed {:d}'.format(seed)

        # Keep all the trajectories by default
        extra_args += ' --keep-trajectories'

        # Number of trajectories
        extra_args += ' --realizations {:d}'.format(number_of_trajectories)

        # We generally don't need the extra stats
        if not stats:
            extra_args += ' --no-stats'

        if method is not None:  # This only works for StochKit 2.1
            extra_args += ' --method {}'.format(method)

        if epsilon is not None:
            extra_args += ' --epsilon {:f}'.format(epsilon)

        if threshold is not None:
            extra_args += ' --threshold {:d}'.format(threshold)

        # Find binary for selected algorithm (SSA, Tau-leaping, ...)
        if algorithm not in ['ssa', 'tau_leaping']:
            raise SimulatorException(
                "algorithm must be 'ssa' or 'tau_leaping'")

        executable = get_path('stochkit_{}'.format(algorithm))

        # Output model file to directory
        fname = os.path.join(self._outdir, 'pysb.xml')

        trajectories = []
        for i in range(len(self.initials)):
            # We write all StochKit output files to a temporary folder
            prefix_outdir = os.path.join(self._outdir, 'output_{}'.format(i))

            # Export model file
            stoch_xml = StochKitExporter(self._model).export(
                self.initials[i], self.param_values[i])
            self._logger.log(EXTENDED_DEBUG, 'StochKit XML:\n%s' % stoch_xml)
            with open(fname, 'wt') as f:
                f.write(stoch_xml)

            # Assemble the argument list
            args = '--model {} --out-dir {} -t {:f} -i {:d}'.format(
                fname, prefix_outdir, t, t_length - 1)

            # If we are using local mode, shell out and run StochKit
            # (SSA or Tau-leaping or ODE)
            cmd = '{} {} {}'.format(executable, args, extra_args)
            self._logger.debug("StochKit run {} of {} (cmd: {})".format(
                (i + 1), len(self.initials), cmd))

            # Execute
            try:
                handle = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE, shell=True)
                return_code = handle.wait()
            except OSError as e:
                raise SimulatorException("StochKit execution failed: \
                {0}\n{1}".format(cmd, e))

            try:
                stderr = handle.stderr.read()
            except Exception as e:
                stderr = 'Error reading stderr: {0}'.format(e)
            try:
                stdout = handle.stdout.read()
            except Exception as e:
                stdout = 'Error reading stdout: {0}'.format(e)

            if return_code != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            traj_dir = os.path.join(prefix_outdir, 'trajectories')
            try:
                trajectories.extend([np.loadtxt(os.path.join(
                    traj_dir, f)) for f in sorted(os.listdir(traj_dir))])
            except Exception as e:
                raise SimulatorException(
                    "Error reading StochKit trajectories: {0}"
                    "\nSTDOUT:{1}\nSTDERR:{2}".format(e, stdout, stderr))

            if len(trajectories) == 0 or len(stderr) != 0:
                raise SimulatorException("Solver execution failed: \
                '{0}' output:\nSTDOUT:\n{1}\nSTDERR:\n{2}".format(
                    cmd, stdout, stderr))

            self._logger.debug("StochKit STDOUT:\n{0}".format(stdout))

        # Return data
        return trajectories
コード例 #20
0
ファイル: bng.py プロジェクト: LoLab-VU/pysb
    def execute(self, reload_netfile=False, skip_file_actions=True):
        """
        Executes all BNG commands in the command queue.

        Parameters
        ----------
        reload_netfile: bool or str
            If true, attempts to reload an existing .net file from a
            previous execute() iteration. If a string, the filename
            specified in the string is supplied to BNG's readFile (which can be
            any file type BNG supports, such as .net or .bngl).
            This is useful for running multiple actions in a row,
            where results need to be read into PySB before a new series of
            actions is executed.
        skip_file_actions: bool
            Only used if the previous argument is not False. Set this
            argument to True to ignore any actions block in the loaded file.
        """
        self.command_queue.write('end actions\n')
        bng_commands = self.command_queue.getvalue()

        # Generate BNGL file
        with open(self.bng_filename, 'w') as bng_file:
            output = ''
            if self.model and not reload_netfile:
                output += self.generator.get_content()
            if reload_netfile:
                filename = reload_netfile if \
                    isinstance(reload_netfile, basestring) \
                    else self.net_filename
                output += '\n  readFile({file=>"%s",skip_actions=>%d})\n' \
                    % (filename, int(skip_file_actions))
            output += bng_commands
            bng_file.write(output)
            lines = output.split('\n')
            line_number_format = 'L{{:0{}d}}  {{}}'.format(int(numpy.ceil(numpy.log10(len(lines)))))
            output = '\n'.join(line_number_format.format(ln + 1, line) for ln, line in enumerate(lines))
            self._logger.debug('BNG command file contents:\n' + output)

        # Reset the command queue, in case execute() is called again
        self.command_queue.close()
        self._init_command_queue()

        bng_exec_args = [pf.get_path('bng'), self.bng_filename]
        if not bng_exec_args[0].endswith('.bat'):
            bng_exec_args.insert(0, 'perl')

        p = subprocess.Popen(bng_exec_args,
                             cwd=self.base_directory,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # output lines as DEBUG, unless a warning or error is encountered
        capture_error = False
        captured_error_lines = []
        for line in iter(p.stdout.readline, b''):
            line = line[:-1].decode('utf-8')
            if line.startswith('ERROR:'):
                capture_error = True
            if capture_error:
                captured_error_lines.append(line)

            self._logger.debug(line)

        # p_out is already consumed, so only get p_err
        (_, p_err) = p.communicate()
        p_err = p_err.decode('utf-8')
        if p.returncode or captured_error_lines:
            raise BngInterfaceError('\n'.join(captured_error_lines) + "\n" +
                                    p_err.rstrip())
コード例 #21
0
def test_get_set_path():
    bng_path = get_path('bng')
    assert os.path.exists(bng_path)
    set_path('bng', bng_path)
コード例 #22
0
ファイル: cupsoda.py プロジェクト: alubbock/pysb
    def run(self, tspan=None, initials=None, param_values=None):
        """Perform a set of integrations.

        Returns a :class:`.SimulationResult` object.

        Parameters
        ----------
        tspan : list-like, optional
            Time values at which the integrations are sampled. The first and
            last values define the time range.
        initials : list-like, optional
            Initial species concentrations for all simulations. Dimensions are
            number of simulation x number of species.    
        param_values : list-like, optional
            Parameters for all simulations. Dimensions are number of
            simulations x number of parameters.

        Returns
        -------
        A :class:`SimulationResult` object

        Notes
        -----
        1. An exception is thrown if `tspan` is not defined in either
           `__init__`or `run`.
           
        2. If neither `initials` nor `param_values` are defined in either 
           `__init__` or `run` a single simulation is run with the initial 
           concentrations and parameter values defined in the model.

        """
        super(CupSodaSimulator, self).run(tspan=tspan, initials=initials,
                                          param_values=param_values,
                                          _run_kwargs=[])

        # Create directories for cupSODA input and output files
        self.outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
                                       dir=self._base_dir)

        self._logger.debug("Output directory is %s" % self.outdir)
        _cupsoda_infiles_dir = os.path.join(self.outdir, "INPUT")
        os.mkdir(_cupsoda_infiles_dir)
        self._cupsoda_outfiles_dir = os.path.join(self.outdir, "OUTPUT")

        # Path to cupSODA executable
        bin_path = get_path('cupsoda')

        # Create cupSODA input files
        self._create_input_files(_cupsoda_infiles_dir)

        # Build command
        # ./cupSODA input_model_folder blocks output_folder simulation_
        # file_prefix gpu_number fitness_calculation memory_use dump
        command = [bin_path, _cupsoda_infiles_dir, str(self.n_blocks),
                   self._cupsoda_outfiles_dir, self._prefix, str(self.gpu),
                   '0', self._memory_usage, str(self._cupsoda_verbose)]

        self._logger.info("Running cupSODA: " + ' '.join(command))
        start_time = time.time()
        # Run simulation and return trajectories
        p = subprocess.Popen(command, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        # for line in iter(p.stdout.readline, b''):
        #     if 'Running time' in line:
        #         self._logger.info(line[:-1])
        (p_out, p_err) = p.communicate()
        p_out = p_out.decode('utf-8')
        p_err = p_err.decode('utf-8')
        logger_level = self._logger.logger.getEffectiveLevel()
        if logger_level <= logging.INFO:
            run_time_match = self._running_time_regex.search(p_out)
            if run_time_match:
                self._logger.info('cupSODA reported time: {} '
                                  'seconds'.format(run_time_match.group(1)))
        self._logger.debug('cupSODA stdout:\n' + p_out)
        if p_err:
            self._logger.error('cupsoda strerr:\n' + p_err)
        if p.returncode:
            raise SimulatorException(
                p_out.rstrip("at line") + "\n" + p_err.rstrip())
        tout, trajectories = self._load_trajectories(
            self._cupsoda_outfiles_dir)
        if self._cleanup:
            shutil.rmtree(self.outdir)
        end_time = time.time()
        self._logger.info("cupSODA + I/O time: {} seconds".format(end_time -
                                                                  start_time))
        return SimulationResult(self, tout, trajectories)
コード例 #23
0
ファイル: sbml.py プロジェクト: shf43/pysb
def sbml_translator(input_file,
                    output_file=None,
                    convention_file=None,
                    naming_conventions=None,
                    user_structures=None,
                    molecule_id=False,
                    atomize=False,
                    pathway_commons=False,
                    verbose=False):
    """
    Run the BioNetGen sbmlTranslator binary to convert SBML to BNGL

    This function runs the external program sbmlTranslator, included with
    BioNetGen, which converts SBML files to BioNetGen language (BNGL).

    Generally, PySB users don't need to run this function directly; an SBML
    model can be imported to PySB in a single step with
    :func:`model_from_sbml`. However, users may wish to note the parameters
    for this function, which alter the way the SBML file is processed. These
    parameters can be supplied as ``**kwargs`` to :func:`model_from_sbml`.

    For more detailed descriptions of the arguments, see the `sbmlTranslator
    documentation <http://bionetgen.org/index.php/SBML2BNGL>`_.

    Parameters
    ----------
    input_file : string
        SBML input filename
    output_file : string, optional
        BNGL output filename
    convention_file : string, optional
        Conventions filename
    naming_conventions : string, optional
        Naming conventions filename
    user_structures : string, optional
        User structures filename
    molecule_id : bool, optional
        Use SBML molecule IDs (True) or names (False).
        IDs are less descriptive but more BNGL friendly. Use only if the
        generated BNGL has syntactic errors
    atomize : bool, optional
        Atomize the model, i.e. attempt to infer molecular structure and
        build rules from the model (True) or just perform a flat import (False)
    pathway_commons : bool, optional
        Use pathway commons to infer molecule binding. This
        setting requires an internet connection and will query the pathway
        commons web service.
    verbose : bool or int, optional (default: False)
        Sets the verbosity level of the logger. See the logging levels and
        constants from Python's logging module for interpretation of integer
        values. False leaves the logging verbosity unchanged, True is equal
        to DEBUG.

    Returns
    -------
    string
        BNGL output filename
    """
    logger = get_logger(__name__, log_level=verbose)
    sbmltrans_bin = os.path.join(os.path.dirname(pf.get_path('bng')),
                                 'bin/sbmlTranslator')
    sbmltrans_args = [sbmltrans_bin, '-i', input_file]
    if output_file is None:
        output_file = os.path.splitext(input_file)[0] + '.bngl'
    sbmltrans_args.extend(['-o', output_file])

    if convention_file:
        sbmltrans_args.extend(['-c', convention_file])

    if naming_conventions:
        sbmltrans_args.extend(['-n', naming_conventions])

    if user_structures:
        sbmltrans_args.extend(['-u', user_structures])

    if molecule_id:
        sbmltrans_args.append('-id')

    if atomize:
        sbmltrans_args.append('-a')

    if pathway_commons:
        sbmltrans_args.append('-p')

    logger.debug("sbmlTranslator command: " + " ".join(sbmltrans_args))

    p = subprocess.Popen(sbmltrans_args,
                         cwd=os.getcwd(),
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)

    if logger.getEffectiveLevel() <= EXTENDED_DEBUG:
        output = "\n".join([line for line in iter(p.stdout.readline, b'')])
        if output:
            logger.log(EXTENDED_DEBUG, "sbmlTranslator output:\n\n" + output)
    (p_out, p_err) = p.communicate()
    if p.returncode:
        raise SbmlTranslationError(p_out.decode('utf-8') + "\n" +
                                   p_err.decode('utf-8'))

    return output_file
コード例 #24
0
ファイル: kappa.py プロジェクト: LoLab-VU/pysb
def run_static_analysis(model, influence_map=False, contact_map=False,
                        cleanup=True, output_prefix=None, output_dir=None,
                        verbose=False):
    """Run static analysis (KaSa) on to get the contact and influence maps.

    If neither influence_map nor contact_map are set to True, then a ValueError
    is raised.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSa.
    influence_map : boolean
        Whether to compute the influence map.
    contact_map : boolean
        Whether to compute the contact map.
    cleanup : boolean
        Specifies whether output files produced by KaSa should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    verbose : boolean
        Whether to pass the output of KaSa through to stdout/stderr.

    Returns
    -------
    StaticAnalysisResult, a namedtuple with two fields, `contact_map` and
    `influence_map`, each containing the respective result as an instance
    of a networkx MultiGraph. If the either the contact_map or influence_map
    argument to the function is False, the corresponding entry in the
    StaticAnalysisResult returned by the function will be None.

    Notes
    -----
    To view a networkx file graphically, use `draw_network`::

        import networkx as nx
        nx.draw_networkx(g, with_labels=True)

    You can use `graphviz_layout` to use graphviz for layout (requires pydot
    library)::

        import networkx as nx
        pos = nx.drawing.nx_pydot.graphviz_layout(g, prog='dot')
        nx.draw_networkx(g, pos, with_labels=True)

    For further information, see the networkx documentation on visualization:
    https://networkx.github.io/documentation/latest/reference/drawing.html
    """

    # Make sure the user has asked for an output!
    if not influence_map and not contact_map:
        raise ValueError('Either contact_map or influence_map (or both) must '
                         'be set to True in order to perform static analysis.')

    gen = KappaGenerator(model, _warn_no_ic=False)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, str(model.name))
    kappa_filename = base_filename + '.ka'
    im_filename = base_filename + '_im.dot'
    cm_filename = base_filename + '_cm.dot'

    # NOTE: in the args passed to KaSa, the directory for the .dot files is
    # specified by the --output_directory option, and the output_contact_map
    # and output_influence_map should only be the base filenames (without
    # a directory prefix).
    # Contact map args:
    if contact_map:
        cm_args = ['--compute-contact-map', '--output-contact-map',
                   os.path.basename(cm_filename),
                   '--output-contact-map-directory', base_directory]
    else:
        cm_args = ['--no-compute-contact-map']
    # Influence map args:
    if influence_map:
        im_args = ['--compute-influence-map', '--output-influence-map',
                   os.path.basename(im_filename),
                   '--output-influence-map-directory', base_directory]
    else:
        im_args = ['--no-compute-influence-map']
    # Full arg list
    args = [kappa_filename] + cm_args + im_args

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        file_data = gen.get_content()
        logger.debug('Kappa file contents:\n\n' + file_data)
        kappa_file.write(file_data)

    # Run KaSa using the given args
    kasa_path = pf.get_path('kasa')
    p = subprocess.Popen([kasa_path] + args,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                         cwd=base_directory)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasaInterfaceError(
            p_out.decode('utf8') + '\n' + p_err.decode('utf8'))

    # Try to create the graphviz objects from the .dot files created
    try:
        # Convert the contact map to a Graph
        cmap = read_dot(cm_filename) if contact_map else None
        imap = read_dot(im_filename) if influence_map else None
    except ImportError:
        if cleanup:
            raise
        else:
            warnings.warn(
                    "The pydot library could not be "
                    "imported, so no MultiGraph "
                    "object returned (returning None); "
                    "contact/influence maps available at %s" %
                    base_directory)
            cmap = None
            imap = None

    # Clean up the temp directory if desired
    if cleanup:
        shutil.rmtree(base_directory)

    return StaticAnalysisResult(cmap, imap)