Exemplo n.º 1
0
    def _run(self, task_id, task_type):
        LOG.debug('Taskflow executor picked up the execution of task ID '
                  '%(task_id)s of task type '
                  '%(task_type)s' % {'task_id': task_id,
                                     'task_type': task_type})

        task = script_utils.get_task(self.task_repo, task_id)
        if task is None:
            # NOTE: This happens if task is not found in the database. In
            # such cases, there is no way to update the task status so,
            # it's ignored here.
            return

        flow = self._get_flow(task)

        try:
            with self._executor() as executor:
                engine = engines.load(flow, self.engine_conf,
                                      executor=executor, **self.engine_kwargs)
                with llistener.DynamicLoggingListener(engine, log=LOG):
                    engine.run()
        except Exception as exc:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
                          {'task_id': task_id, 'exc': exc.message})
Exemplo n.º 2
0
    def execute(self, actions):
        try:
            # NOTE(jed) We want to have a strong separation of concern
            # between the Watcher planner and the Watcher Applier in order
            # to us the possibility to support several workflow engine.
            # We want to provide the 'taskflow' engine by
            # default although we still want to leave the possibility for
            # the users to change it.
            # todo(jed) we need to change the way the actions are stored.
            # The current implementation only use a linked list of actions.
            # todo(jed) add olso conf for retry and name
            flow = gf.Flow("watcher_flow")
            previous = None
            for a in actions:
                task = TaskFlowActionContainer(a, self)
                flow.add(task)
                if previous is None:
                    previous = task
                    # we have only one Action in the Action Plan
                    if len(actions) == 1:
                        nop = TaskFlowNop()
                        flow.add(nop)
                        flow.link(previous, nop)
                else:
                    # decider == guard (UML)
                    flow.link(previous, task, decider=self.decider)
                    previous = task

            e = engines.load(flow)
            e.run()

        except Exception as e:
            raise exception.WorkflowExecutionException(error=e)
Exemplo n.º 3
0
def main(*args):
    """Main method of artman."""
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = loader.read_user_config(flags.user_config)
    _adjust_root_dir(flags.root_dir)
    pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)

    if flags.local:
        try:
            pipeline = pipeline_factory.make_pipeline(pipeline_name,
                                                      **pipeline_kwargs)
            # Hardcoded to run pipeline in serial engine, though not necessarily.
            engine = engines.load(
                pipeline.flow, engine='serial', store=pipeline.kwargs)
            engine.run()
        except:
            logger.error(traceback.format_exc())
            sys.exit(32)
        finally:
            _change_owner(flags, pipeline_name, pipeline_kwargs)
    else:
        support.check_docker_requirements(flags.image)
        # Note: artman currently won't work if input directory doesn't contain
        # common-protos.
        logger.info('Running artman command in a Docker instance.')
        _run_artman_in_docker(flags)
Exemplo n.º 4
0
def main(args):
    pipeline_name, pipeline_kwargs, env, local_repo = _parse_args(args)

    if local_repo:
        pipeline_kwargs = _load_local_repo(local_repo, **pipeline_kwargs)

    if env:
        # Execute pipeline task remotely based on the specified env param.
        pipeline = pipeline_factory.make_pipeline(
            pipeline_name, True, **pipeline_kwargs)
        jb = job_util.post_remote_pipeline_job_and_wait(pipeline, env)
        task_details, flow_detail = job_util.fetch_job_status(jb, env)

        for task_detail in task_details:
            if task_detail.name == 'BlobUploadTask' and task_detail.results:
                bucket_name, path, _ = task_detail.results
                pipeline_util.download_from_gcs(
                    bucket_name,
                    path,
                    os.path.join(tempfile.gettempdir(), 'artman-remote'))

        if flow_detail.state != 'SUCCESS':
            # Print the remote log if the pipeline execution completes but not
            # with SUCCESS status.
            _print_log(pipeline_kwargs['pipeline_id'])

    else:
        pipeline = pipeline_factory.make_pipeline(
            pipeline_name, False, **pipeline_kwargs)
        # Hardcoded to run pipeline in serial engine, though not necessarily.
        engine = engines.load(pipeline.flow, engine='serial',
                              store=pipeline.kwargs)
        engine.run()
Exemplo n.º 5
0
def run_update_property_flow(property_spec, update_type, update_info_list):
    e = engines.load(
        update_property_flow(),
        store={"property_spec": property_spec, "update_type": update_type, "update_info_list": update_info_list},
        engine="serial",
    )
    e.run()
Exemplo n.º 6
0
def main(*args):
    """Main method of artman."""
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = loader.read_user_config(flags.user_config)
    _adjust_root_dir(flags.root_dir)
    pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)

    if flags.local:
        try:
            pipeline = pipeline_factory.make_pipeline(pipeline_name, False,
                                                      **pipeline_kwargs)
            # Hardcoded to run pipeline in serial engine, though not necessarily.
            engine = engines.load(
                pipeline.flow, engine='serial', store=pipeline.kwargs)
            engine.run()
        except:
            logger.error(traceback.format_exc())
            sys.exit(32)
        finally:
            _change_owner(flags, pipeline_name, pipeline_kwargs)
    else:
        support.check_docker_requirements(flags.image)
        # Note: artman currently won't work if input directory doesn't contain
        # shared configuration files (e.g. gapic/packaging/dependencies.yaml).
        # This will make artman less useful for non-Google APIs.
        # TODO(ethanbao): Fix that by checking the input directory and
        # pulling the shared configuration files if necessary.
        logger.info('Running artman command in a Docker instance.')
        _run_artman_in_docker(flags)
Exemplo n.º 7
0
def execute_flow(flow):
    """
    Create all necessary prerequisites like task database and thread pool and
    execute TaskFlow flow.
    :param flow: TaskFlow flow instance
    """
    backend = backends.fetch({
        'connection': 'sqlite:///' + TASK_DATABASE_FILE,
        'isolation_level': 'SERIALIZABLE'
    })
    executor = futurist.ThreadPoolExecutor(max_workers=MAX_WORKERS)
    conn = backend.get_connection()
    logbook, flow_detail = _ensure_db_initialized(conn, flow)
    engine = engines.load(
        flow, flow_detail=flow_detail, backend=backend, book=logbook,
        engine='parallel', executor=executor)

    engine.compile()
    _workaround_reverted_reset(flow_detail)
    try:
        engine.run()
    except exceptions.WrappedFailure as wf:
        for failure in wf:
            if failure.exc_info is not None:
                traceback.print_exception(*failure.exc_info)
            else:
                print failure
Exemplo n.º 8
0
def run_flow(flow_name, init_params=None):
    """ run the tasks in given flow name
    """
    """ actual taskflow runner
    """
    if flow_name not in app_task_flows:
        raise Exception('taskflow-%s not definied' % flow_name)

    flow = lflow.Flow(flow_name)

    for task_cls, _ in app_task_flows[flow_name]:
        task_params = getattr(task_cls, 'properties')
        if isinstance(task_params, dict):
            flow.add(task_cls(**task_params))

    eng = engines.load(flow, store=init_params or {})

    if sys.version_info > (2, 7):
        with printing.PrintingListener(eng), timing.PrintingDurationListener(eng):
            eng.run()
    else:
        with nested(printing.PrintingListener(eng), timing.PrintingDurationListener(eng)):
            eng.run()

    return eng.storage.fetch_all()
Exemplo n.º 9
0
    def _run(self, task_id, task_type):
        LOG.debug('Taskflow executor picked up the execution of task ID '
                  '%(task_id)s of task type '
                  '%(task_type)s', {'task_id': task_id,
                                    'task_type': task_type})

        task = script_utils.get_task(self.task_repo, task_id)
        if task is None:
            # NOTE: This happens if task is not found in the database. In
            # such cases, there is no way to update the task status so,
            # it's ignored here.
            return

        flow = self._get_flow(task)
        executor = self._fetch_an_executor()
        try:
            engine = engines.load(
                flow,
                engine=CONF.taskflow_executor.engine_mode, executor=executor,
                max_workers=CONF.taskflow_executor.max_workers)
            with llistener.DynamicLoggingListener(engine, log=LOG):
                engine.run()
        except Exception as exc:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
                          {'task_id': task_id,
                           'exc': encodeutils.exception_to_unicode(exc)})
                # TODO(sabari): Check for specific exceptions and update the
                # task failure message.
                task.fail(_('Task failed due to Internal Error'))
                self.task_repo.save(task)
        finally:
            if executor is not None:
                executor.shutdown()
Exemplo n.º 10
0
    def _run(self, task_id, task_type):
        LOG.debug(
            "Taskflow executor picked up the execution of task ID "
            "%(task_id)s of task type "
            "%(task_type)s" % {"task_id": task_id, "task_type": task_type}
        )

        task = script_utils.get_task(self.task_repo, task_id)
        if task is None:
            # NOTE: This happens if task is not found in the database. In
            # such cases, there is no way to update the task status so,
            # it's ignored here.
            return

        flow = self._get_flow(task)

        try:
            with self._executor() as executor:
                engine = engines.load(flow, self.engine_conf, executor=executor, **self.engine_kwargs)
                with llistener.DynamicLoggingListener(engine, log=LOG):
                    engine.run()
        except Exception as exc:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to execute task %(task_id)s: %(exc)s") % {"task_id": task_id, "exc": exc.message})
                # TODO(sabari): Check for specific exceptions and update the
                # task failure message.
                task.fail(_("Task failed due to Internal Error"))
                self.task_repo.save(task)
def execute(input_data=None):
    print "input Data" + str(input_data) if input_data else "nothing"
    flow = get_flow(input_data)

    eng = engines.load(flow, engine_conf='parallel')
    result = eng.run()
    return result
Exemplo n.º 12
0
    def deploy(self):
        """
        deploy image in compute node, return the origin path to create snapshot
        :returns origin_path: origin path to create snapshot
        """
        LOG.debug("Virtman: in deploy_base_image, image name = %s, "
                  "multipath_path = %s, origin_path = %s, cached_path = %s, "
                  "is_login = %s" %
                  (self.image_name, self.multipath_path,
                   self.origin_path, self.cached_path,
                   self.is_login))

        # Check if it had origin or not!
        if self.origin_path:
            return self.origin_path

        # check local image and save the image connections
        self.check_local_image()

        # Reform connections
        # If it has image on the local node or no path to connect, connect to
        # root
        parent_connections = self.modify_parent_connection()

        # rebuild multipath
        self.rebuild_multipath(parent_connections)

        # build_chain = Chain()
        # build_chain.add_step(
        #     partial(Cache.create_cache, base_image),
        #     partial(Cache.delete_cache, base_image))
        # build_chain.add_step(
        #     partial(Origin.create_origin, base_image),
        #     partial(Origin.delete_origin, base_image))
        # build_chain.add_step(
        #     partial(Target.create_target, base_image),
        #     partial(Target.delete_target, base_image))
        # build_chain.add_step(
        #     partial(Register.login_master, base_image),
        #     partial(Register.logout_master, base_image))
        # build_chain.do()

        wf = linear_flow.Flow("base_image_flow")
        wf.add(CreateCacheTask(),
               CreateOriginTask(),
               CreateTargetTask(),
               LoginMasterTask()
               )

        dict_for_task = dict(base_image=self)
        en = engines.load(wf, store=dict_for_task)
        en.run()

        LOG.debug("Virtman: baseimage OK!\n"
                  "target_id =  %s, origin_path = %s, origin_name = %s, "
                  "cached_path = %s, multipath_path = %s, multipath_name = %s" %
                  (self.target_id, self.origin_path,
                   self.origin_name, self.cached_path,
                   self.multipath_path, self.multipath_name))
Exemplo n.º 13
0
    def test_formatted_via_listener(self, mock_format_node):
        mock_format_node.return_value = 'A node'

        flo = self._make_test_flow()
        e = engines.load(flo)
        with logging_listener.DynamicLoggingListener(e):
            self.assertRaises(RuntimeError, e.run)
        self.assertTrue(mock_format_node.called)
Exemplo n.º 14
0
 def test_checks_for_dups_globally(self):
     flo = gf.Flow("test").add(
         gf.Flow("int1").add(test_utils.DummyTask(name="a")),
         gf.Flow("int2").add(test_utils.DummyTask(name="a")))
     e = engines.load(flo)
     self.assertRaisesRegexp(exc.Duplicate,
                             '^Atoms with duplicate names',
                             e.compile)
Exemplo n.º 15
0
def execute(input_data=None):
    flow = get_flow(input_data)

    #TODO: need to figure out a better way to allow user to specify
    #TODO: specific resource to migrate

    eng = engines.load(flow)
    result = eng.run()
    return result
Exemplo n.º 16
0
def calculate(engine_conf):
    # Subdivide the work into X pieces, then request each worker to calculate
    # one of those chunks and then later we will write these chunks out to
    # an image bitmap file.

    # And unordered flow is used here since the mandelbrot calculation is an
    # example of a embarrassingly parallel computation that we can scatter
    # across as many workers as possible.
    flow = uf.Flow("mandelbrot")

    # These symbols will be automatically given to tasks as input to there
    # execute method, in this case these are constants used in the mandelbrot
    # calculation.
    store = {
        'mandelbrot_config': [-2.0, 1.0, -1.0, 1.0, MAX_ITERATIONS],
        'image_config': {
            'size': IMAGE_SIZE,
        }
    }

    # We need the task names to be in the right order so that we can extract
    # the final results in the right order (we don't care about the order when
    # executing).
    task_names = []

    # Compose our workflow.
    height, _width = IMAGE_SIZE
    chunk_size = int(math.ceil(height / float(CHUNK_COUNT)))
    for i in compat_range(0, CHUNK_COUNT):
        chunk_name = 'chunk_%s' % i
        task_name = "calculation_%s" % i
        # Break the calculation up into chunk size pieces.
        rows = [i * chunk_size, i * chunk_size + chunk_size]
        flow.add(
            MandelCalculator(task_name,
                             # This ensures the storage symbol with name
                             # 'chunk_name' is sent into the tasks local
                             # symbol 'chunk'. This is how we give each
                             # calculator its own correct sequence of rows
                             # to work on.
                             rebind={'chunk': chunk_name}))
        store[chunk_name] = rows
        task_names.append(task_name)

    # Now execute it.
    eng = engines.load(flow, store=store, engine_conf=engine_conf)
    eng.run()

    # Gather all the results and order them for further processing.
    gather = []
    for name in task_names:
        gather.extend(eng.storage.get(name))
    points = []
    for y, row in enumerate(gather):
        for x, color in enumerate(row):
            points.append(((x, y), color))
    return points
Exemplo n.º 17
0
def main():
  pipeline_name, pipeline_kwargs, remote_mode = _parse_args()
  pipeline = pipeline_factory.make_pipeline(pipeline_name, **pipeline_kwargs)

  if remote_mode:
    job_util.post_remote_pipeline_job(pipeline)
  else:
    # Hardcoded to execute the pipeline in serial engine, though not necessarily.
    engine = engines.load(pipeline.flow, engine="serial", store=pipeline.kwargs)
    engine.run()
Exemplo n.º 18
0
def run(engine_options):
    flow = lf.Flow('simple-linear').add(
        utils.TaskOneArgOneReturn(provides='result1'),
        utils.TaskMultiArgOneReturn(provides='result2')
    )
    eng = engines.load(flow,
                       store=dict(x=111, y=222, z=333),
                       engine='worker-based', **engine_options)
    eng.run()
    return eng.storage.fetch_all()
Exemplo n.º 19
0
    def _taskflow_load(self, flow, **kwargs):
        eng = tf_engines.load(
            flow,
            engine_conf=CONF.task_flow.engine,
            executor=self.executor,
            **kwargs)
        eng.compile()
        eng.prepare()

        return eng
Exemplo n.º 20
0
    def _taskflow_load(self, flow, **kwargs):
        eng = tf_engines.load(
            flow,
            engine=CONF.task_flow.engine,
            executor=self.executor,
            never_resolve=CONF.task_flow.disable_revert,
            **kwargs)
        eng.compile()
        eng.prepare()

        return eng
def run_check_cert_status_and_update_flow(domain_name, cert_type, flavor_id,
                                          project_id):
    e = engines.load(
        check_cert_status_and_update_flow(),
        store={
            'domain_name': domain_name,
            'cert_type': cert_type,
            'flavor_id': flavor_id,
            'project_id': project_id
        },
        engine='serial')
    e.run()
Exemplo n.º 22
0
    def start(self, *req, **kwargs):
        self.logger.info("--- Start Recovery ---")
        flow = self.prepare()
        eng = engines.load(flow)
        eng.run()
        # results = eng.storage.fetch_all()
        # self.logger.debug(results)

        start_vms(self.nova_handler.instance_ids)
        neutron_port_db = DRNeutronPortDao()
        associate_floatingips(neutron_port_db.get_ports_associated())
        self.logger.info("Secondary site is active")
        return ['Hello Recovery']
Exemplo n.º 23
0
    def test_exc_info_format(self):
        flo = self._make_test_flow()
        e = engines.load(flo)
        self.assertRaises(RuntimeError, e.run)

        fails = e.storage.get_execute_failures()
        self.assertEqual(1, len(fails))
        self.assertIn('Broken', fails)
        fail = fails['Broken']

        f = formatters.FailureFormatter(e)
        (exc_info, details) = f.format(fail, self._broken_atom_matcher)
        self.assertEqual(3, len(exc_info))
        self.assertEqual("", details)
def main():
    if len(sys.argv) == 2:
        tbl = []
        with open(sys.argv[1], 'rb') as fh:
            reader = csv.reader(fh)
            for row in reader:
                tbl.append([float(r) if r else 0.0 for r in row])
    else:
        # Make some random table out of thin air...
        tbl = []
        cols = random.randint(1, 100)
        rows = random.randint(1, 100)
        for _i in compat_range(0, rows):
            row = []
            for _j in compat_range(0, cols):
                row.append(random.random())
            tbl.append(row)

    # Generate the work to be done.
    f = make_flow(tbl)

    # Now run it (using the specified executor)...
    try:
        executor = futurist.GreenThreadPoolExecutor(max_workers=5)
    except RuntimeError:
        # No eventlet currently active, use real threads instead.
        executor = futurist.ThreadPoolExecutor(max_workers=5)
    try:
        e = engines.load(f, engine='parallel', executor=executor)
        for st in e.run_iter():
            print(st)
    finally:
        executor.shutdown()

    # Find the old rows and put them into place...
    #
    # TODO(harlowja): probably easier just to sort instead of search...
    computed_tbl = []
    for i in compat_range(0, len(tbl)):
        for t in f:
            if t.index == i:
                computed_tbl.append(e.storage.get(t.name))

    # Do some basic validation (which causes the return code of this process
    # to be different if things were not as expected...)
    if len(computed_tbl) != len(tbl):
        return 1
    else:
        return 0
Exemplo n.º 25
0
 def get_engine(self, flow, **kwargs):
     if flow is None:
         LOG.error(_LE("Flow is None, build it first"))
         return
     executor = kwargs.get('executor', None)
     engine = kwargs.get('engine', None)
     store = kwargs.get('store', None)
     if not executor:
         executor = futurist.GreenThreadPoolExecutor()
     if not engine:
         engine = 'parallel'
     flow_engine = engines.load(flow,
                                executor=executor,
                                engine=engine,
                                store=store)
     return flow_engine
Exemplo n.º 26
0
 def get_engine(self, flow, **kwargs):
     if flow is None:
         LOG.error(_LE("The flow is None,build it first"))
         raise exception.InvalidTaskFlowObject(reason="The flow is None")
     executor = kwargs.get('executor', None)
     engine = kwargs.get('engine', None)
     store = kwargs.get('store', None)
     if not executor:
         executor = futurist.GreenThreadPoolExecutor()
     if not engine:
         engine = 'parallel'
     flow_engine = engines.load(flow,
                                executor=executor,
                                engine=engine,
                                store=store)
     return flow_engine
Exemplo n.º 27
0
def main(*args):
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = read_user_config(flags)
    pipeline_name, pipeline_kwargs, env = normalize_flags(flags, user_config)

    # Flesh out the pipline arguments with information gleamed from
    # loading the appropriate config in the googleapis local repo.
    pipeline_kwargs = _load_local_repo(
        pipeline_kwargs['local_paths']['googleapis'],
        **pipeline_kwargs
    )

    if flags.remote:
        # Execute pipeline task remotely based on the specified env param.
        pipeline = pipeline_factory.make_pipeline(
            pipeline_name, True, **pipeline_kwargs)
        jb = job_util.post_remote_pipeline_job_and_wait(pipeline, env)
        task_details, flow_detail = job_util.fetch_job_status(jb, env)

        for task_detail in task_details:
            if (task_detail.name.startswith('BlobUploadTask') and
                        task_detail.results):
                bucket_name, path, _ = task_detail.results
                pipeline_util.download_from_gcs(
                    bucket_name,
                    path,
                    os.path.join(tempfile.gettempdir(), 'artman-remote'))

        if flow_detail.state != 'SUCCESS':
            # Print the remote log if the pipeline execution completes but not
            # with SUCCESS status.
            _print_log(pipeline_kwargs['pipeline_id'])

    else:
        pipeline = pipeline_factory.make_pipeline(
            pipeline_name, False, **pipeline_kwargs)
        # Hardcoded to run pipeline in serial engine, though not necessarily.
        engine = engines.load(pipeline.flow, engine='serial',
                              store=pipeline.kwargs)
        engine.run()
        _chown_for_artman_output()
Exemplo n.º 28
0
    def test_exc_info_with_details_format_hidden(self, mock_get_execute):
        flo = self._make_test_flow()
        e = engines.load(flo)
        self.assertRaises(RuntimeError, e.run)
        fails = e.storage.get_execute_failures()
        self.assertEqual(1, len(fails))
        self.assertIn('Broken', fails)
        fail = fails['Broken']

        # Doing this allows the details to be shown...
        e.storage.set_atom_intention("Broken", states.EXECUTE)
        hide_inputs_outputs_of = ['Broken', "Happy-1", "Happy-2"]
        f = formatters.FailureFormatter(
            e, hide_inputs_outputs_of=hide_inputs_outputs_of)
        (exc_info, details) = f.format(fail, self._broken_atom_matcher)
        self.assertEqual(3, len(exc_info))
        self.assertFalse(mock_get_execute.called)
Exemplo n.º 29
0
    def post(self, queue_name, messages, client_uuid, project=None):
        """Send messages to the subscribers."""
        if self.subscription_controller:
            subscribers = self.subscription_controller.list(queue_name,
                                                            project)

            wh_flow = uf.Flow('webhook_notifier_flow')

            for s in list(next(subscribers)):
                for m in messages:
                    wh_flow.add(self._generate_task(s['subscriber'], m))

            e = engines.load(wh_flow, executor=self.executor,
                             engine='parallel')
            e.run()
        else:
            LOG.error('Failed to get subscription controller.')
Exemplo n.º 30
0
    def test_exc_info_with_details_format(self, mock_format_node):
        mock_format_node.return_value = 'A node'

        flo = self._make_test_flow()
        e = engines.load(flo)
        self.assertRaises(RuntimeError, e.run)
        fails = e.storage.get_execute_failures()
        self.assertEqual(1, len(fails))
        self.assertIn('Broken', fails)
        fail = fails['Broken']

        # Doing this allows the details to be shown...
        e.storage.set_atom_intention("Broken", states.EXECUTE)
        f = formatters.FailureFormatter(e)
        (exc_info, details) = f.format(fail, self._broken_atom_matcher)
        self.assertEqual(3, len(exc_info))
        self.assertTrue(mock_format_node.called)
Exemplo n.º 31
0
        # stored). The combination of these 2 objects unique ids (uuids) allows
        # the users of taskflow to reassociate the workflows that were
        # potentially running (and which may have partially completed) back
        # with taskflow so that those workflows can be resumed (or reverted)
        # after a process/thread/engine has failed in someway.
        logbook = p_utils.temporary_log_book(backend)
        flow_detail = p_utils.create_flow_detail(flow, logbook, backend)
        print("!! Your tracking id is: '%s+%s'" % (logbook.uuid,
                                                   flow_detail.uuid))
        print("!! Please submit this on later runs for tracking purposes")
    else:
        flow_detail = find_flow_detail(backend, book_id, flow_id)

    # Load and run.
    engine_conf = {
        'engine': 'serial',
    }
    engine = engines.load(flow,
                          flow_detail=flow_detail,
                          backend=backend,
                          engine_conf=engine_conf)
    engine.run()

# How to use.
#
# 1. $ python me.py "sqlite:////tmp/cinder.db"
# 2. ctrl-c before this finishes
# 3. Find the tracking id (search for 'Your tracking id is')
# 4. $ python me.py "sqlite:////tmp/cinder.db" "$tracking_id"
# 5. Profit!
Exemplo n.º 32
0
# it using a in-memory backend and pre/post run we dump out the contents
# of the in-memory backends tree structure (which can be quite useful to
# look at for debugging or other analysis).


class PrintTask(task.Task):
    def execute(self):
        print("Running '%s'" % self.name)


# Make a little flow and run it...
f = lf.Flow('root')
for alpha in ['a', 'b', 'c']:
    f.add(PrintTask(alpha))

e = engines.load(f)
e.compile()
e.prepare()

# After prepare the storage layer + backend can now be accessed safely...
backend = e.storage.backend

print("----------")
print("Before run")
print("----------")
print(backend.memory.pformat())
print("----------")

e.run()

print("---------")
Exemplo n.º 33
0
from taskflow import engines
from taskflow.listeners import logging as logging_listener
from taskflow.patterns import linear_flow as lf
from taskflow import task

# INTRO: This example walks through a miniature workflow which will do a
# simple echo operation; during this execution a listener is associated with
# the engine to receive all notifications about what the flow has performed,
# this example dumps that output to the stdout for viewing (at debug level
# to show all the information which is possible).


class Echo(task.Task):
    def execute(self):
        print(self.name)


# Generate the work to be done (but don't do it yet).
wf = lf.Flow('abc')
wf.add(Echo('a'))
wf.add(Echo('b'))
wf.add(Echo('c'))

# This will associate the listener with the engine (the listener
# will automatically register for notifications with the engine and deregister
# when the context is exited).
e = engines.load(wf)
with logging_listener.DynamicLoggingListener(e):
    e.run()
Exemplo n.º 34
0
    if not all([book_id, flow_id]):
        # If no 'tracking id' (think a fedex or ups tracking id) is provided
        # then we create one by creating a logbook (where flow details are
        # stored) and creating a flow detail (where flow and task state is
        # stored). The combination of these 2 objects unique ids (uuids) allows
        # the users of taskflow to reassociate the workflows that were
        # potentially running (and which may have partially completed) back
        # with taskflow so that those workflows can be resumed (or reverted)
        # after a process/thread/engine has failed in someway.
        logbook = p_utils.temporary_log_book(backend)
        flow_detail = p_utils.create_flow_detail(flow, logbook, backend)
        print("!! Your tracking id is: '%s+%s'" % (logbook.uuid,
                                                   flow_detail.uuid))
        print("!! Please submit this on later runs for tracking purposes")
    else:
        flow_detail = find_flow_detail(backend, book_id, flow_id)

    # Load and run.
    engine = engines.load(flow,
                          flow_detail=flow_detail,
                          backend=backend, engine='serial')
    engine.run()

# How to use.
#
# 1. $ python me.py "sqlite:////tmp/cinder.db"
# 2. ctrl-c before this finishes
# 3. Find the tracking id (search for 'Your tracking id is')
# 4. $ python me.py "sqlite:////tmp/cinder.db" "$tracking_id"
# 5. Profit!
Exemplo n.º 35
0
        else:
            f.add(
                EchoTask(name="echoer_%s" % curr_value,
                         rebind={'value': curr_value}))
        curr_value = next_value
    return f


# Adjust this number to change how many engines/flows run at once.
flow_count = 1
flows = []
for i in range(0, flow_count):
    f = make_alphabet_flow(i + 1)
    flows.append(make_alphabet_flow(i + 1))
be = persistence_backends.fetch(conf={'connection': 'memory'})
book = persistence_utils.temporary_log_book(be)
engine_iters = []
for f in flows:
    fd = persistence_utils.create_flow_detail(f, book, be)
    e = engines.load(f, flow_detail=fd, backend=be, book=book)
    e.compile()
    e.storage.inject({'A': 'A'})
    e.prepare()
    engine_iters.append(e.run_iter())
while engine_iters:
    for it in list(engine_iters):
        try:
            print(six.next(it))
        except StopIteration:
            engine_iters.remove(it)
Exemplo n.º 36
0
song.add(
    PrinterTask("conductor@begin",
                show_name=False,
                inject={'output': "*ding*"}), hi_chorus, world_chorus,
    PrinterTask("conductor@end", show_name=False, inject={'output': "*dong*"}))

# Run in parallel using eventlet green threads...
try:
    import eventlet as _eventlet  # noqa
except ImportError:
    # No eventlet currently active, skip running with it...
    pass
else:
    print("-- Running in parallel using eventlet --")
    e = engines.load(song,
                     executor='greenthreaded',
                     engine='parallel',
                     max_workers=1)
    e.run()

# Run in parallel using real threads...
print("-- Running in parallel using threads --")
e = engines.load(song, executor='threaded', engine='parallel', max_workers=1)
e.run()

# Run in parallel using external processes...
print("-- Running in parallel using processes --")
e = engines.load(song, executor='processes', engine='parallel', max_workers=1)
e.run()

# Run serially (aka, if the workflow could have been ran in parallel, it will
# not be when ran in this mode)...
Exemplo n.º 37
0
def run(engine_options):
    reporter = EventReporter()
    reporter.notifier.register(notifier.Notifier.ANY, event_receiver)
    flow = lf.Flow('event-reporter').add(reporter)
    eng = engines.load(flow, engine='worker-based', **engine_options)
    eng.run()
Exemplo n.º 38
0
from taskflow import task
from taskflow import engines
from taskflow.patterns import linear_flow


def exec(x):
    print(x * 2)
    raise IOError


def rev(x, *args, **kwargs):
    print("In revert method")


func_task = task.FunctorTask(execute=exec,
                             revert=rev,
                             name="samplefunctor",
                             inject={"x": 2})

flow = linear_flow.Flow('send_message').add(func_task)
e = engines.load(flow)
e.run()
Exemplo n.º 39
0
            inject={'output': hello}))
    world_chorus.add(PrinterTask("%s@world" % name, inject={'output': world}))

# The composition starts with the conductor and then runs in sequence with
# the chorus running in parallel, but no matter what the 'hello' chorus must
# always run before the 'world' chorus (otherwise the world will fall apart).
song.add(
    PrinterTask("conductor@begin",
                show_name=False,
                inject={'output': "*ding*"}), hi_chorus, world_chorus,
    PrinterTask("conductor@end", show_name=False, inject={'output': "*dong*"}))

# Run in parallel using eventlet green threads...
if eventlet_utils.EVENTLET_AVAILABLE:
    with futures.GreenThreadPoolExecutor() as executor:
        e = engines.load(song, executor=executor, engine='parallel')
        e.run()

# Run in parallel using real threads...
with futures.ThreadPoolExecutor(max_workers=1) as executor:
    e = engines.load(song, executor=executor, engine='parallel')
    e.run()

# Run in parallel using external processes...
with futures.ProcessPoolExecutor(max_workers=1) as executor:
    e = engines.load(song, executor=executor, engine='parallel')
    e.run()

# Run serially (aka, if the workflow could have been ran in parallel, it will
# not be when ran in this mode)...
e = engines.load(song, engine='serial')
Exemplo n.º 40
0
    # creating a flow detail (where flow and task state is stored). The
    # combination of these 2 objects unique ids (uuids) allows the users of
    # taskflow to reassociate the workflows that were potentially running (and
    # which may have partially completed) back with taskflow so that those
    # workflows can be resumed (or reverted) after a process/thread/engine
    # has failed in someway.
    logbook = p_utils.temporary_log_book(backend)
    flow_detail = p_utils.create_flow_detail(flow, logbook, backend)
    print("!! Your tracking id is: '%s+%s'" % (logbook.uuid, flow_detail.uuid))
    print("!! Please submit this on later runs for tracking purposes")
else:
    flow_detail = find_flow_detail(backend, book_id, flow_id)

# Annnnd load and run.
engine = engines.load(flow,
                      flow_detail=flow_detail,
                      backend=backend,
                      engine_conf={
                          'engine': 'parallel',
                          'executor': e_utils.GreenExecutor(10),
                      })
engine.run()

# How to use.
#
# 1. $ python me.py "sqlite:////tmp/cinder.db"
# 2. ctrl-c before this finishes
# 3. Find the tracking id (search for 'Your tracking id is')
# 4. $ python me.py "sqlite:////tmp/cinder.db" "$tracking_id"
# 5. Profit!
Exemplo n.º 41
0
from taskflow import task
from taskflow import engines
from taskflow.patterns import linear_flow


######### Map Functor Task Example #########
def exec(x):
    return x * 2


map_func = task.MapFunctorTask(functor=exec,
                               requires=['a', 'b', 'c'],
                               provides=('output_map'))
flow = linear_flow.Flow('test_mapfunctor').add(map_func)
e = engines.load(flow, store={'a': 1, 'b': 2, 'c': 3})
e.run()
print(e.storage.fetch('output_map'))

######### Reduce Functor Task Example #########

reduce_func = task.ReduceFunctorTask(functor=lambda a, b: a + b,
                                     requires=['a', 'b', 'c'],
                                     provides=('output_reduce'))
flow = linear_flow.Flow('test_reducefunctor').add(reduce_func)
e = engines.load(flow, store={'a': 1, 'b': 2, 'c': 3})
e.run()

print(e.storage.fetch('output_reduce'))
Exemplo n.º 42
0
        conn.upgrade()

# Now we can run.
engine_config = {
    'backend': backend_config,
    'engine_conf': 'serial',
    'book': logbook.LogBook("my-test"),
}

# Make a flow that will blowup if the file doesn't exist previously, if it
# did exist, assume we won't blowup (and therefore this shows the undo
# and redo that a flow will go through).
flo = make_flow(blowup=blowup)
print_wrapped("Running")

try:
    eng = engines.load(flo, **engine_config)
    eng.run()
    try:
        os.unlink(persist_filename)
    except (OSError, IOError):
        pass
except Exception:
    # NOTE(harlowja): don't exit with non-zero status code, so that we can
    # print the book contents, as well as avoiding exiting also makes the
    # unit tests (which also runs these examples) pass.
    traceback.print_exc(file=sys.stdout)

print_wrapped("Book contents")
print(p_utils.pformat(engine_config['book']))
Exemplo n.º 43
0
 def test_checks_for_dups(self):
     flo = gf.Flow("test").add(test_utils.DummyTask(name="a"),
                               test_utils.DummyTask(name="a"))
     e = engines.load(flo)
     self.assertRaisesRegex(exc.Duplicate, '^Atoms with duplicate names',
                            e.compile)
Exemplo n.º 44
0
import json
from random import randint

from taskflow import engines
from taskflow.patterns import linear_flow as lf
from taskflow.patterns import unordered_flow as uf
from taskflow import task

class normalize(task.Task):
    def __init__(self, name, show_name=True, inject=None):
        super(normalize, self).__init__(name, inject=inject)
        self._show_name = show_name

    def execute(self, output):
        headers = {'content-type': 'application/json'}
        url = 'http://192.168.99.101:30402'
        data = {"array":output}
        response = requests.post(url, data=json.dumps(data), headers=headers)
        print("%s: %s" % (self.name, response.text))

normalizationPod = lf.Flow('hello')

dataList = [('task1', [randint(0,9),randint(0,9),randint(0,9)]), ('task2', [randint(0,9),randint(0,9),randint(0,9)]), ('task3', [randint(0,9),randint(0,9),randint(0,9)])]
for (name, data) in dataList:
    print data
    normalizationPod.add(normalize("%s@hello" % name, inject={'output': data}))

print "\nExecuting the pod..."
e = engines.load(normalizationPod, engine='serial')
e.run()
Exemplo n.º 45
0
class PrintTask(task.Task):
    def execute(self):
        print("Running '%s'" % self.name)


backend = backends.fetch({
    'connection': 'memory://',
})
book, flow_detail = pu.temporary_flow_detail(backend=backend)

# Make a little flow and run it...
f = lf.Flow('root')
for alpha in ['a', 'b', 'c']:
    f.add(PrintTask(alpha))

e = engines.load(f, flow_detail=flow_detail, book=book, backend=backend)
e.compile()
e.prepare()

print("----------")
print("Before run")
print("----------")
print(backend.memory.pformat())
print("----------")

e.run()

print("---------")
print("After run")
print("---------")
entries = [
        # state on resumption (since they are unique and will vary for each
        # task that is created). A name based off the volume id that is to be
        # created is more easily tied back to the original task so that the
        # volume create can be resumed/revert, and is much easier to use for
        # audit and tracking purposes.
        base_name = reflection.get_callable_name(self)
        super(VolumeCreator,
              self).__init__(name="%s-%s" % (base_name, volume_id))
        self._volume_id = volume_id

    def execute(self):
        print("Making volume %s" % (self._volume_id))
        time.sleep(random.random() * MAX_CREATE_TIME)
        print("Finished making volume %s" % (self._volume_id))


# Assume there is no ordering dependency between volumes.
flow = uf.Flow("volume-maker")
for i in range(0, VOLUME_COUNT):
    flow.add(VolumeCreator(volume_id="vol-%s" % (i)))

# Show how much time the overall engine loading and running takes.
with show_time(name=flow.name.title()):
    eng = engines.load(flow, engine=engine)
    # This context manager automatically adds (and automatically removes) a
    # helpful set of state transition notification printing helper utilities
    # that show you exactly what transitions the engine is going through
    # while running the various volume create tasks.
    with printing.PrintingListener(eng):
        eng.run()
Exemplo n.º 47
0
# Resources (db handles and similar) of course can't be persisted so we need
# to make sure that we pass this resource fetcher to the tasks constructor so
# that the tasks have access to any needed resources (the resources are
# lazily loaded so that they are only created when they are used).
resources = ResourceFetcher()
flow = lf.Flow("initialize-me")

# 1. First we extract the api request into a useable format.
# 2. Then we go ahead and make a database entry for our request.
flow.add(ExtractInputRequest(resources), MakeDBEntry(resources))

# 3. Then we activate our payment method and finally declare success.
sub_flow = gf.Flow("after-initialize")
sub_flow.add(ActivateDriver(resources), DeclareSuccess())
flow.add(sub_flow)

# Initially populate the storage with the following request object,
# prepopulating this allows the tasks that dependent on the 'request' variable
# to start processing (in this case this is the ExtractInputRequest task).
store = {
    'request': misc.AttrDict(user="******", id="1.35"),
}
eng = engines.load(flow, engine_conf='serial', store=store)

# This context manager automatically adds (and automatically removes) a
# helpful set of state transition notification printing helper utilities
# that show you exactly what transitions the engine is going through
# while running the various billing related tasks.
with printing.PrintingListener(eng):
    eng.run()
        print("BitsAndPiecesTask")
        return 'BITs', 'PIECEs'


dog = Dog(requires=("water", "grass"),
          inject={
              'food': 'food',
              'grass': 'grass',
              'water': 'water'
          })
flow = linear_flow.Flow('send_message').add(
    ConnectToServer('name', rebind=('phone_number', 'test_list')))
flow.add(dog)
flow.add(BitsAndPiecesTask(provides=('bits', 'pieces')))
try:
    print("Loading...")
    e = engines.load(flow,
                     store={
                         'phone_number': '8105760129',
                         'test_list': [1, 2, 3, 4]
                     })
    print("Compiling...")
    e.compile()
    print("Preparing...")
    e.prepare()
    print("Running...")
    result = e.run()
except Exception as e:
    print(e)

print(e.storage.fetch('bits'))
Exemplo n.º 49
0
    def __handling(self, flow_id, flow_name, code_and_para):
        def group_send(content):
            Group('task-{uuid}'.format(uuid=flow_id)).send(
                {'text': json.dumps(content, ensure_ascii=False)})

        def publish_message(message):
            content = {'message_type': 'message', 'message': message}
            group_send(content)

        def get_window_result(window_id, timeout):
            try:
                window_queue = self.window_results.get(window_id)
                if window_queue:
                    window_result = self.window_results[window_id].get(
                        timeout=timeout)
                else:
                    window_result = None
            except:
                window_result = None
            finally:
                if window_id in self.window_results:
                    del self.window_results[window_id]
            return window_result

        def publish_window(window_detail, timeout=60):
            window_id = str(uuid.uuid1())
            self.window_results[window_id] = Queue()
            content = {
                'message_type': 'window',
                'window_detail': window_detail,
                'window_id': window_id,
                'timeout': timeout
            }
            group_send(content)
            result = get_window_result(window_id, timeout)
            return result

        def flow_notification(state, details):
            content = {'message_type': 'flow_state', 'state': state}
            group_send(content)

        def task_notification(state, details):
            try:
                task_name = details['task_name']
                content = {
                    'message_type': 'state',
                    'state': state,
                    'name': task_name
                }
                group_send(content)
                task_instance = TaskInstance.objects.get(uuid=flow_id)
                record = task_instance.get_record(task_name)
                if record:
                    record['message_type'] = 'result'
                    group_send(record)
            except ObjectDoesNotExist:
                LOGGER.exception('任务{flow_id}不存在'.format(flow_id=flow_id))
            except BaseException:
                LOGGER.exception('任务监视器出现异常')

        try:
            flow = linear_flow.Flow(flow_name)
            flow.add(TaskStart(name='start'))
            for item_name, item_code, item_para in code_and_para:
                flow.add(
                    TaskCustom(item_code,
                               item_para,
                               name=item_name,
                               publish_message=publish_message,
                               publish_window=publish_window,
                               get_window_result=get_window_result))
            flow.add(TaskEnd(name='end'))
            store = {'flow_id': flow_id}
            engine = engines.load(flow, store=store)
            engine.notifier.register(notifier.Notifier.ANY, flow_notification)
            engine.atom_notifier.register(notifier.Notifier.ANY,
                                          task_notification)
            engine.compile()
            engine.prepare()
            self.engines[flow_id] = engine
            engine.run()
        except BaseException:
            LOGGER.exception("测试任务{flow_id}异常结束".format(flow_id=flow_id))
Exemplo n.º 50
0
                f.seek(0)
                lines = f.readlines()
                # 循环500次,从第一页开始爬取数据,当页面没有数据时终端退出循环
                for i in range(1, 500):
                    print("start crawl %s, %s" % (name, base_url % i))
                    web_source = requests.get(base_url % i,
                                              headers=REQUEST_HEADER)
                    soup = BeautifulSoup(web_source.content.decode("gbk"),
                                         'lxml')
                    table = soup.select('.J-ajax-table')[0]
                    tbody = table.select('tbody tr')
                    # 当tbody为空时,则说明当前页已经没有数据了,此时终止循环
                    if len(tbody) == 0:
                        break
                    for tr in tbody:
                        fields = tr.select('td')
                        # 将每行记录第一列去掉,第一列为序号,没有存储必要
                        record = [field.text.strip() for field in fields[1:]]
                        # 如果记录还没有写入文件中,则执行写入操作,否则跳过这行写入
                        if record[0] not in crawled_list:
                            f.writelines([','.join(record) + '\n'])
                        # 同花顺网站有反爬虫的机制,爬取速度过快很可能被封
                        time.sleep(1)


if __name__ == '__main__':
    bizdate = '20200214'
    tasks = [MoneyFlowDownload('moneyflow data download')]
    flow = linear_flow.Flow('ths data download').add(*tasks)
    e = engines.load(flow, store={'bizdate': bizdate})
    e.run()
Exemplo n.º 51
0

def allow(history):
    print(history)
    return False


# Declare our work to be done...
r = gf.Flow("root")
r_a = DummyTask('r-a')
r_b = DummyTask('r-b')
r.add(r_a, r_b)
r.link(r_a, r_b, decider=allow)

# Setup and run the engine layer.
e = engines.load(r)
e.compile()
e.prepare()
e.run()

print("---------")
print("After run")
print("---------")
backend = e.storage.backend
entries = [
    os.path.join(backend.memory.root_path, child)
    for child in backend.memory.ls(backend.memory.root_path)
]
while entries:
    path = entries.pop()
    value = backend.memory[path]
Exemplo n.º 52
0
    backend_uri = "sqlite:///%s" % (persist_path)
else:
    persist_path = os.path.join(tempfile.gettempdir(), "persisting")
    backend_uri = "file:///%s" % (persist_path)

if os.path.exists(persist_path):
    blowup = False
else:
    blowup = True

with eu.get_backend(backend_uri) as backend:
    # Make a flow that will blow up if the file didn't exist previously, if it
    # did exist, assume we won't blow up (and therefore this shows the undo
    # and redo that a flow will go through).
    book = models.LogBook("my-test")
    flow = make_flow(blowup=blowup)
    eu.print_wrapped("Running")
    try:
        eng = engines.load(flow, engine='serial', backend=backend, book=book)
        eng.run()
        if not blowup:
            eu.rm_path(persist_path)
    except Exception:
        # NOTE(harlowja): don't exit with non-zero status code, so that we can
        # print the book contents, as well as avoiding exiting also makes the
        # unit tests (which also runs these examples) pass.
        traceback.print_exc(file=sys.stdout)

    eu.print_wrapped("Book contents")
    print(book.pformat())
Exemplo n.º 53
0
        # state on resumption (since they are unique and will vary for each
        # task that is created). A name based off the volume id that is to be
        # created is more easily tied back to the original task so that the
        # volume create can be resumed/revert, and is much easier to use for
        # audit and tracking purposes.
        base_name = reflection.get_callable_name(self)
        super(VolumeCreator,
              self).__init__(name="%s-%s" % (base_name, volume_id))
        self._volume_id = volume_id

    def execute(self):
        print("Making volume %s" % (self._volume_id))
        time.sleep(random.random() * MAX_CREATE_TIME)
        print("Finished making volume %s" % (self._volume_id))


# Assume there is no ordering dependency between volumes.
flow = uf.Flow("volume-maker")
for i in range(0, VOLUME_COUNT):
    flow.add(VolumeCreator(volume_id="vol-%s" % (i)))

# Show how much time the overall engine loading and running takes.
with show_time(name=flow.name.title()):
    eng = engines.load(flow, engine_conf=engine_conf)
    # This context manager automatically adds (and automatically removes) a
    # helpful set of state transition notification printing helper utilities
    # that show you exactly what transitions the engine is going through
    # while running the various volume create tasks.
    with printing.PrintingListener(eng):
        eng.run()
Exemplo n.º 54
0
    # initial 0% and 100% are triggered automatically by the engine when
    # a task is started and finished (so that's why those are not emitted
    # here).
    _PROGRESS_PARTS = [fractions.Fraction("%s/5" % x) for x in range(1, 5)]

    def execute(self):
        for p in self._PROGRESS_PARTS:
            self.update_progress(p)
            time.sleep(self._DELAY)


print("Constructing...")
soup = linear_flow.Flow("alphabet-soup")
for letter in string.ascii_lowercase:
    abc = AlphabetTask(letter)
    abc.notifier.register(task.EVENT_UPDATE_PROGRESS,
                          functools.partial(progress_printer, abc))
    soup.add(abc)
try:
    print("Loading...")
    e = engines.load(soup, engine='parallel', executor='processes')
    print("Compiling...")
    e.compile()
    print("Preparing...")
    e.prepare()
    print("Running...")
    e.run()
    print("Done...")
except exceptions.NotImplementedError as e:
    print(e)