コード例 #1
0
ファイル: test_run_process.py プロジェクト: nsouedet/capsul
    def execution_dummy(self):
        """ Test to execute DummyProcess.
        """
        # Create a process instance
        process = get_process_instance(DummyProcess, output_directory=self.output_dir)

        # Test the cache mechanism
        for param in [(1., 2.3), (2., 2.), (1., 2.3)]:
            run_process(self.output_dir, process, cachedir=self.cachedir,
                        generate_logging=False, verbose=1,
                        f1=param[0], f2=param[1])
            self.assertEqual(process.res, param[0] * param[1])
            self.assertEqual(process.output_directory, self.output_dir)
コード例 #2
0
ファイル: study_config.py プロジェクト: VincentFrouin/capsul
    def _run(self, process_instance, verbose, **kwargs):
        """ Method to execute a process in a study configuration environment.

        Parameters
        ----------
        process_instance: Process instance (mandatory)
            the process we want to execute
        verbose: int
            if different from zero, print console messages.
        """
        # Message
        logger.info("Study Config: executing process '{0}'...".format(
            process_instance.id))

        # Run
        destination_folder = os.path.join(
            self.output_directory,
            "{0}-{1}".format(self.process_counter, process_instance.name))
        if self.get_trait_value("use_smart_caching") in [None, False]:
            cachedir = None
        else:
            cachedir = self.output_directory
        returncode, log_file = run_process(
            destination_folder,
            process_instance,
            cachedir,
            self.generate_logging,
            **kwargs)

        # Increment the number of executed process count
        self.process_counter += 1
コード例 #3
0
ファイル: study_config.py プロジェクト: andvazva/Parcellation
    def _run(self, process_instance, output_directory, verbose, **kwargs):
        """ Method to execute a process in a study configuration environment.

        Parameters
        ----------
        process_instance: Process instance (mandatory)
            the process we want to execute
        output_directory: Directory name (optional)
            the output directory to use for process execution. This replaces
            self.output_directory but left it unchanged.
        verbose: int
            if different from zero, print console messages.
        """
        # Message
        logger.info("Study Config: executing process '{0}'...".format(
            process_instance.id))

        # Run
        if self.get_trait_value("use_smart_caching") in [None, False]:
            cachedir = None
        else:
            cachedir = output_directory

        # Update the output directory folder if necessary
        if output_directory is not None and output_directory is not Undefined and output_directory:
            if self.process_output_directory:
                output_directory = os.path.join(
                    output_directory,
                    '%s-%s' % (self.process_counter, process_instance.name))
            # Guarantee that the output directory exists
            if not os.path.isdir(output_directory):
                os.makedirs(output_directory)
            if self.process_output_directory:
                if 'output_directory' in process_instance.user_traits():
                    if (process_instance.output_directory is Undefined
                            or not (process_instance.output_directory)):
                        process_instance.output_directory = output_directory

        returncode, log_file = run_process(
            output_directory,
            process_instance,
            cachedir=cachedir,
            generate_logging=self.generate_logging,
            verbose=verbose,
            **kwargs)

        # Increment the number of executed process count
        self.process_counter += 1
        return returncode
コード例 #4
0
ファイル: study_config.py プロジェクト: cedrixic/capsul
    def _run(self, process_instance, output_directory, verbose, **kwargs):
        """ Method to execute a process in a study configuration environment.

        Parameters
        ----------
        process_instance: Process instance (mandatory)
            the process we want to execute
        output_directory: Directory name (optional)
            the output directory to use for process execution. This replaces
            self.output_directory but left it unchanged.
        verbose: int
            if different from zero, print console messages.
        """
        # Message
        logger.info("Study Config: executing process '{0}'...".format(
            process_instance.id))

        # Run
        if self.get_trait_value("use_smart_caching") in [None, False]:
            cachedir = None
        else:
            cachedir = output_directory

        # Update the output directory folder if necessary
        if output_directory is not None and output_directory is not Undefined and output_directory:
            if self.process_output_directory:
                output_directory = os.path.join(output_directory, '%s-%s' % (self.process_counter, process_instance.name))
            # Guarantee that the output directory exists
            if not os.path.isdir(output_directory):
                os.makedirs(output_directory)
            if self.process_output_directory:
                if 'output_directory' in process_instance.user_traits():
                    if (process_instance.output_directory is Undefined or
                            not(process_instance.output_directory)):
                        process_instance.output_directory = output_directory
        
        returncode, log_file = run_process(
            output_directory,
            process_instance,
            cachedir=cachedir,
            generate_logging=self.generate_logging,
            verbose=verbose,
            **kwargs)

        # Increment the number of executed process count
        self.process_counter += 1
        return returncode
コード例 #5
0
ファイル: study_config.py プロジェクト: servoz/capsul
    def run(self, process_or_pipeline, output_directory= None,
            execute_qc_nodes=True, verbose=0, configuration_dict=None,
            **kwargs):
        """Method to execute a process or a pipline in a study configuration
         environment.

         Depending on the studies_config settings, it may be a sequential run,
         or a parallel run, which can involve remote execution (through soma-
         workflow).

         Only pipeline nodes can be filtered on the 'execute_qc_nodes'
         attribute.

         A valid output directory is exepcted to execute the process or the
         pepeline without soma-workflow.

        Parameters
        ----------
        process_or_pipeline: Process or Pipeline instance (mandatory)
            the process or pipeline we want to execute
        output_directory: Directory name (optional)
            the output directory to use for process execution. This replaces
            self.output_directory but left it unchanged.
        execute_qc_nodes: bool (optional, default False)
            if True execute process nodes that are tagged as qualtity control
            process nodes.
        verbose: int
            if different from zero, print console messages.
        configuration_dict: dict (optional)
            configuration dictionary
        """


        # Use soma workflow to execute the pipeline or process in parallel
        # on the local machine. This has now moved to CapsulEngine.
        if self.get_trait_value("use_soma_workflow"):
            return self.engine.check_call(process_or_pipeline, **kwargs)

        # here we only deal with the (obsolete) local execution mode.

        with self.run_lock:
            self.run_interruption_request = False

        # set parameters values
        for k, v in six.iteritems(kwargs):
            setattr(process_or_pipeline, k, v)
        # output_directory cannot be in kwargs
        if output_directory not in (None, Undefined) \
                and 'output_directory' in process_or_pipeline.traits():
            process_or_pipeline.output_directory = output_directory

        missing = process_or_pipeline.get_missing_mandatory_parameters()
        if len(missing) != 0:
            ptype = 'process'
            if isinstance(process_or_pipeline, Pipeline):
                ptype = 'pipeline'
            raise ValueError('In %s %s: missing mandatory parameters: %s'
                             % (ptype, process_or_pipeline.name,
                                ', '.join(missing)))


        # Use the local machine to execute the pipeline or process
        if output_directory is None or output_directory is Undefined:
            if 'output_directory' in process_or_pipeline.traits():
                output_directory = getattr(process_or_pipeline,
                                            'output_directory')
            if output_directory is None or output_directory is Undefined:
                output_directory = self.output_directory
         # Not all processes need an output_directory defined on
        # StudyConfig
        if output_directory is not None \
                and output_directory is not Undefined:
            # Check the output directory is valid
            if not isinstance(output_directory, six.string_types):
                raise ValueError(
                    "'{0}' is not a valid directory. A valid output "
                    "directory is expected to run the process or "
                    "pipeline.".format(output_directory))
            try:
                if not os.path.isdir(output_directory):
                    os.makedirs(output_directory)
            except OSError:
                raise ValueError(
                    "Can't create folder '{0}', please investigate.".format(
                        output_directory))

        # Temporary files can be generated for pipelines
        temporary_files = []
        result = None
        try:
            # Generate ordered execution list
            execution_list = []
            if isinstance(process_or_pipeline, Pipeline):
                execution_list = \
                    process_or_pipeline.workflow_ordered_nodes()
                # Filter process nodes if necessary
                if not execute_qc_nodes:
                    execution_list = [node for node in execution_list
                                      if node.node_type
                                          == "processing_node"]
                for node in execution_list:
                    # check temporary outputs and allocate files
                    process_or_pipeline._check_temporary_files_for_node(
                        node, temporary_files)
            elif isinstance(process_or_pipeline, Process):
                execution_list.append(process_or_pipeline)
            else:
                raise Exception(
                    "Unknown instance type. Got {0}and expect Process or "
                    "Pipeline instances".format(
                        process_or_pipeline.__module__.name__))

            with self.run_lock:
                if self.run_interruption_request:
                    self.run_interruption_request = False
                    raise RuntimeError('Execution interruption requested')

            # Execute each process node element
            for process_node in execution_list:
                # Execute the process instance contained in the node
                if isinstance(process_node, Node):
                    result, log_file = run_process(
                        output_directory,
                        process_node.process,
                        generate_logging=self.generate_logging,
                        verbose=verbose,
                        configuration_dict=configuration_dict)

                # Execute the process instance
                else:
                    result, log_file = run_process(
                        output_directory,
                        process_node,
                        generate_logging=self.generate_logging,
                        verbose=verbose,
                        configuration_dict=configuration_dict)

                with self.run_lock:
                    if self.run_interruption_request:
                        self.run_interruption_request = False
                        raise RuntimeError('Execution interruption requested')

        finally:
            # Destroy temporary files
            if temporary_files:
                # If temporary files have been created, we are sure that
                # process_or_pipeline is a pipeline with a method
                # _free_temporary_files.
                process_or_pipeline._free_temporary_files(temporary_files)
        return result