コード例 #1
0
            if self.prefix is not None:
                r = not import_from(self.prefix)
            elif prefix is not None:
                r = not import_from(prefix)
            else:
                r = (not import_from('vistrails.packages.') and
                     not import_from('userpackages.'))
        except Exception, e:
            raise self.LoadFailed(self, e, traceback.format_exc())

        if r:
            raise self.InitializationFailed(self, errors)

        self.set_properties()
        self.do_load_configuration()
        record_usage(loaded_package='%s %s' % (self.identifier, self.version))

    def initialize(self):
        if not self._loaded:
            raise VistrailsInternalError("Called initialize() on non-loaded "
                                         "Package %s" % self.codepath)

        self.check_requirements()

        try:
            name = self.prefix + self.codepath + '.init'
            try:
                __import__(name, globals(), locals(), [])
            except ImportError, e:
                # FIXME !!! Want to differentiate between .init not
                # existing and an error with an import in the .init
コード例 #2
0
    def execute(self, pipeline, **kwargs):
        """execute(pipeline, **kwargs):

        kwargs:
          controller = fetch('controller', None)
          locator = fetch('locator', None)
          current_version = fetch('current_version', None)
          view = fetch('view', DummyView())
          aliases = fetch('aliases', None)
          params = fetch('params', None)
          extra_info = fetch('extra_info', None)
          logger = fetch('logger', DummyLogController)
          reason = fetch('reason', None)
          actions = fetch('actions', None)
          done_summon_hooks = fetch('done_summon_hooks', [])
          module_executed_hook = fetch('module_executed_hook', [])
          job_monitor = fetch('job_monitor', None)

        Executes a pipeline using caching. Caching works by reusing
        pipelines directly.  This means that there exists one global
        pipeline whose parts get executed over and over again.

        This function returns a triple of dictionaries (objs, errs, execs).

        objs is a mapping from local ids (the ids in the pipeline) to
        objects **in the persistent pipeline**. Notice, these are not
        the objects inside the passed pipeline, but the objects they
        were mapped to in the persistent pipeline.

        errs is a dictionary from local ids to error messages of modules
        that might have returns errors.

        execs is a dictionary from local ids to boolean values indicating
        whether they were executed or not.

        If modules have no error associated with but were not executed, it
        means they were cached."""

        # Setup named arguments. We don't use named parameters so
        # that positional parameter calls fail earlier
        new_kwargs = {}

        def fetch(name, default):
            new_kwargs[name] = r = kwargs.pop(name, default)
            return r

        controller = fetch('controller', None)
        locator = fetch('locator', None)
        current_version = fetch('current_version', None)
        view = fetch('view', DummyView())
        vistrail_variables = fetch('vistrail_variables', None)
        aliases = fetch('aliases', None)
        params = fetch('params', None)
        extra_info = fetch('extra_info', None)
        logger = fetch('logger', DummyLogController)
        sinks = fetch('sinks', None)
        reason = fetch('reason', None)
        actions = fetch('actions', None)
        done_summon_hooks = fetch('done_summon_hooks', [])
        module_executed_hook = fetch('module_executed_hook', [])
        stop_on_error = fetch('stop_on_error', True)
        parent_exec = fetch('parent_exec', None)
        job_monitor = fetch('job_monitor', None)

        if len(kwargs) > 0:
            raise VistrailsInternalError('Wrong parameters passed '
                                         'to execute: %s' % kwargs)
        self.clean_non_cacheable_modules()

        record_usage(execute=True)

        #         if controller is not None:
        #             vistrail = controller.vistrail
        #             (pipeline, module_remap) = \
        #                 core.db.io.expand_workflow(vistrail, pipeline)
        #             new_kwargs['module_remap'] = module_remap
        #         else:
        #             vistrail = None

        if controller is not None:
            vistrail = controller.vistrail
        else:
            vistrail = None

        time_start = time.time()
        logger = logger.start_workflow_execution(parent_exec, vistrail,
                                                 pipeline, current_version)
        new_kwargs['logger'] = logger
        self.annotate_workflow_execution(logger, reason, aliases, params)

        res = self.setup_pipeline(pipeline, **new_kwargs)
        modules_added = res[2]
        conns_added = res[3]
        to_delete = res[4]
        errors = res[5]
        if len(errors) == 0:
            res = self.execute_pipeline(pipeline, *(res[:2]), **new_kwargs)
        else:
            res = (to_delete, res[0], errors, {}, {}, {}, [])
            for (i, error) in errors.iteritems():
                view.set_module_error(i, error.msg, error.errorTrace)
        self.finalize_pipeline(pipeline, *(res[:-1]), **new_kwargs)
        time_end = time.time()

        result = InstanceObject(objects=res[1],
                                errors=res[2],
                                executed=res[3],
                                suspended=res[4],
                                parameter_changes=res[6],
                                modules_added=modules_added,
                                conns_added=conns_added)

        logger.finish_workflow_execution(result.errors,
                                         suspended=result.suspended)

        record_usage(time=time_end - time_start,
                     modules=len(res[1]),
                     errors=len(res[2]),
                     executed=len(res[3]),
                     suspended=len(res[4]))

        return result
コード例 #3
0
ファイル: cached.py プロジェクト: AnyarInc/VisTrails
    def execute(self, pipeline, **kwargs):
        """execute(pipeline, **kwargs):

        kwargs:
          controller = fetch('controller', None)
          locator = fetch('locator', None)
          current_version = fetch('current_version', None)
          view = fetch('view', DummyView())
          aliases = fetch('aliases', None)
          params = fetch('params', None)
          extra_info = fetch('extra_info', None)
          logger = fetch('logger', DummyLogController)
          reason = fetch('reason', None)
          actions = fetch('actions', None)
          done_summon_hooks = fetch('done_summon_hooks', [])
          module_executed_hook = fetch('module_executed_hook', [])
          job_monitor = fetch('job_monitor', None)

        Executes a pipeline using caching. Caching works by reusing
        pipelines directly.  This means that there exists one global
        pipeline whose parts get executed over and over again.

        This function returns a triple of dictionaries (objs, errs, execs).

        objs is a mapping from local ids (the ids in the pipeline) to
        objects **in the persistent pipeline**. Notice, these are not
        the objects inside the passed pipeline, but the objects they
        were mapped to in the persistent pipeline.

        errs is a dictionary from local ids to error messages of modules
        that might have returns errors.

        execs is a dictionary from local ids to boolean values indicating
        whether they were executed or not.

        If modules have no error associated with but were not executed, it
        means they were cached."""

        # Setup named arguments. We don't use named parameters so
        # that positional parameter calls fail earlier
        new_kwargs = {}
        def fetch(name, default):
            new_kwargs[name] = r = kwargs.pop(name, default)
            return r
        controller = fetch('controller', None)
        locator = fetch('locator', None)
        current_version = fetch('current_version', None)
        view = fetch('view', DummyView())
        vistrail_variables = fetch('vistrail_variables', None)
        aliases = fetch('aliases', None)
        params = fetch('params', None)
        extra_info = fetch('extra_info', None)
        logger = fetch('logger', DummyLogController)
        sinks = fetch('sinks', None)
        reason = fetch('reason', None)
        actions = fetch('actions', None)
        done_summon_hooks = fetch('done_summon_hooks', [])
        module_executed_hook = fetch('module_executed_hook', [])
        stop_on_error = fetch('stop_on_error', True)
        parent_exec = fetch('parent_exec', None)
        job_monitor = fetch('job_monitor', None)

        if len(kwargs) > 0:
            raise VistrailsInternalError('Wrong parameters passed '
                                         'to execute: %s' % kwargs)
        self.clean_non_cacheable_modules()

        record_usage(execute=True)

#         if controller is not None:
#             vistrail = controller.vistrail
#             (pipeline, module_remap) = \
#                 core.db.io.expand_workflow(vistrail, pipeline)
#             new_kwargs['module_remap'] = module_remap
#         else:
#             vistrail = None

        if controller is not None:
            vistrail = controller.vistrail
        else:
            vistrail = None

        time_start = time.time()
        logger = logger.start_workflow_execution(
                parent_exec,
                vistrail, pipeline, current_version)
        new_kwargs['logger'] = logger
        self.annotate_workflow_execution(logger, reason, aliases, params)

        res = self.setup_pipeline(pipeline, **new_kwargs)
        modules_added = res[2]
        conns_added = res[3]
        to_delete = res[4]
        errors = res[5]
        if len(errors) == 0:
            res = self.execute_pipeline(pipeline, *(res[:2]), **new_kwargs)
        else:
            res = (to_delete, res[0], errors, {}, {}, {}, [])
            for (i, error) in errors.iteritems():
                view.set_module_error(i, error.msg, error.errorTrace)
        self.finalize_pipeline(pipeline, *(res[:-1]), **new_kwargs)
        time_end = time.time()

        result = InstanceObject(objects=res[1],
                                errors=res[2],
                                executed=res[3],
                                suspended=res[4],
                                parameter_changes=res[6],
                                modules_added=modules_added,
                                conns_added=conns_added)

        logger.finish_workflow_execution(result.errors, suspended=result.suspended)

        record_usage(time=time_end - time_start, modules=len(res[1]),
                     errors=len(res[2]), executed=len(res[3]),
                     suspended=len(res[4]))

        return result
コード例 #4
0
                    me.module.logging.end_update(me.module, me)
                    logging_obj.signalError(me.module, me)
                    abort = abort or me.abort
            except ModuleError, me:
                me.module.logging.end_update(me.module, me, me.errorTrace)
                logging_obj.signalError(me.module, me)
                abort = me.abort
            except ModuleBreakpoint, mb:
                mb.module.logging.end_update(mb.module)
                logging_obj.signalError(mb.module, mb)
                abort = True
            if stop_on_error or abort:
                break

        if Generator.generators:
            record_usage(generators=len(Generator.generators))
        # execute all generators until inputs are exhausted
        # this makes sure branching and multiple sinks are executed correctly
        if not logging_obj.errors and not logging_obj.suspended and \
                                                          Generator.generators:
            result = True
            abort = False
            while result is not None:
                try:
                    for m in Generator.generators:
                        result = m.generator.next()
                    continue
                except AbortExecution:
                    break
                except ModuleErrors, mes:
                    for me in mes.module_errors:
コード例 #5
0
ファイル: cached.py プロジェクト: AnyarInc/VisTrails
                    me.module.logging.end_update(me.module, me)
                    logging_obj.signalError(me.module, me)
                    abort = abort or me.abort
            except ModuleError, me:
                me.module.logging.end_update(me.module, me, me.errorTrace)
                logging_obj.signalError(me.module, me)
                abort = me.abort
            except ModuleBreakpoint, mb:
                mb.module.logging.end_update(mb.module)
                logging_obj.signalError(mb.module, mb)
                abort = True
            if stop_on_error or abort:
                break

        if Generator.generators:
            record_usage(generators=len(Generator.generators))
        # execute all generators until inputs are exhausted
        # this makes sure branching and multiple sinks are executed correctly
        if not logging_obj.errors and not logging_obj.suspended and \
                                                          Generator.generators:
            result = True
            abort = False
            while result is not None:
                try:
                    for m in Generator.generators:
                        result = m.generator.next()
                    continue
                except AbortExecution:
                    break
                except ModuleErrors, mes:
                    for me in mes.module_errors: