示例#1
0
def standalone(context, workflow, domain, workflow_id, execution_timeout, tags,
               decision_tasks_timeout, input, nb_workers, heartbeat):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    if not workflow_id:
        workflow_id = get_workflow(workflow).name

    task_list = get_task_list(workflow_id)
    logger.info('using task list {}'.format(task_list))
    decider_proc = multiprocessing.Process(target=decider.command.start,
                                           args=(
                                               [workflow],
                                               domain,
                                               task_list,
                                           ))
    decider_proc.start()

    worker_proc = multiprocessing.Process(target=worker.command.start,
                                          args=(
                                              workflow,
                                              domain,
                                              task_list,
                                              nb_workers,
                                              heartbeat,
                                          ))
    worker_proc.start()

    print >> sys.stderr, 'starting workflow {}'.format(workflow)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        input,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if ex.status == ex.STATUS_CLOSED:
            print >> sys.stderr, 'execution {} finished'.format(
                ex.workflow_id, )
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()
示例#2
0
def activity_rerun(domain, workflow_id, run_id, input, scheduled_id,
                   activity_id):
    # handle params
    if not activity_id and not scheduled_id:
        logger.error("Please supply --scheduled-id or --activity-id.")
        sys.exit(1)

    input_override = None
    if input:
        input_override = format.decode(input)

    # find workflow execution
    try:
        wfe = helpers.get_workflow_execution(domain, workflow_id, run_id)
    except (swf.exceptions.DoesNotExistError, IndexError):
        logger.error("Couldn't find execution, exiting.")
        sys.exit(1)
    logger.info("Found execution: workflowId={} runId={}".format(
        wfe.workflow_id, wfe.run_id))

    # now rerun the specified activity
    history = History(wfe.history())
    history.parse()
    task, args, kwargs, meta, params = helpers.find_activity(
        history,
        scheduled_id=scheduled_id,
        activity_id=activity_id,
        input=input_override,
    )
    kwargs["context"].update({
        "workflow_id": wfe.workflow_id,
        "run_id": wfe.run_id,
    })
    logger.debug("Found activity. Last execution:")
    for line in json_dumps(params, pretty=True).split("\n"):
        logger.debug(line)
    if input_override:
        logger.info("NB: input will be overriden with the passed one!")
    logger.info("Will re-run: {}(*{}, **{}) [+meta={}]".format(
        task, args, kwargs, meta))

    # download binaries if needed
    download_binaries(meta.get("binaries", {}))

    # execute the activity task with the correct arguments
    instance = ActivityTask(task, *args, **kwargs)
    result = instance.execute()
    if hasattr(instance, "post_execute"):
        instance.post_execute()
    logger.info("Result (JSON): {}".format(json_dumps(result, compact=False)))
示例#3
0
def restart_workflow(domain, workflow_id, run_id):
    ex = helpers.get_workflow_execution(domain, workflow_id, run_id)
    history = ex.history()
    ex.terminate()
    new_ex = ex.workflow_type.start_execution(
        ex.workflow_id,
        task_list=ex.task_list,
        execution_timeout=ex.execution_timeout,
        input=history.events[0].input,
        tag_list=ex.tag_list,
        decision_tasks_timeout=ex.decision_tasks_timeout,
    )
    print '{workflow_id} {run_id}'.format(
        workflow_id=new_ex.workflow_id,
        run_id=new_ex.run_id,
    )
示例#4
0
def restart_workflow(domain, workflow_id, run_id):
    ex = helpers.get_workflow_execution(domain, workflow_id, run_id)
    history = ex.history()
    ex.terminate(reason='workflow.restart')
    new_ex = ex.workflow_type.start_execution(
        ex.workflow_id,
        task_list=ex.task_list,
        execution_timeout=ex.execution_timeout,
        input=history.events[0].input,
        tag_list=ex.tag_list,
        decision_tasks_timeout=ex.decision_tasks_timeout,
    )
    print('{workflow_id} {run_id}'.format(
        workflow_id=new_ex.workflow_id,
        run_id=new_ex.run_id,
    ))
示例#5
0
def activity_rerun(domain,
                   workflow_id,
                   run_id,
                   input,
                   scheduled_id,
                   activity_id):
    # handle params
    if not activity_id and not scheduled_id:
        logger.error("Please supply --scheduled-id or --activity-id.")
        sys.exit(1)

    input_override = None
    if input:
        input_override = format.decode(input)

    # find workflow execution
    try:
        wfe = helpers.get_workflow_execution(domain, workflow_id, run_id)
    except (swf.exceptions.DoesNotExistError, IndexError):
        logger.error("Couldn't find execution, exiting.")
        sys.exit(1)
    logger.info("Found execution: workflowId={} runId={}".format(wfe.workflow_id, wfe.run_id))

    # now rerun the specified activity
    history = History(wfe.history())
    history.parse()
    task, args, kwargs, meta, params = helpers.find_activity(
        history, scheduled_id=scheduled_id, activity_id=activity_id, input=input_override,
    )
    logger.debug("Found activity. Last execution:")
    for line in json_dumps(params, pretty=True).split("\n"):
        logger.debug(line)
    if input_override:
        logger.info("NB: input will be overriden with the passed one!")
    logger.info("Will re-run: {}(*{}, **{}) [+meta={}]".format(task, args, kwargs, meta))

    # download binaries if needed
    download_binaries(meta.get("binaries", {}))

    # execute the activity task with the correct arguments
    instance = ActivityTask(task, *args, **kwargs)
    result = instance.execute()
    if hasattr(instance, 'post_execute'):
        instance.post_execute()
    logger.info("Result (JSON): {}".format(json_dumps(result, compact=False)))
示例#6
0
def activity_rerun(domain, workflow_id, run_id, input, scheduled_id,
                   activity_id):
    # handle params
    if not activity_id and not scheduled_id:
        logger.error("Please supply --scheduled-id or --activity-id.")
        sys.exit(1)

    input_override = None
    if input:
        input_override = json.loads(input)

    # find workflow execution
    try:
        wfe = helpers.get_workflow_execution(domain, workflow_id, run_id)
    except (swf.exceptions.DoesNotExistError, IndexError):
        logger.error("Couldn't find execution, exiting.")
        sys.exit(1)
    logger.info("Found execution: workflowId={} runId={}".format(
        wfe.workflow_id, wfe.run_id))

    # now rerun the specified activity
    history = History(wfe.history())
    history.parse()
    func, args, kwargs, params = helpers.find_activity(
        history,
        scheduled_id=scheduled_id,
        activity_id=activity_id,
        input=input_override,
    )
    logger.debug("Found activity. Last execution:")
    for line in json_dumps(params, pretty=True).split("\n"):
        logger.debug(line)
    if input_override:
        logger.info("NB: input will be overriden with the passed one!")
    logger.info("Will re-run: {}(*{}, **{})".format(func.__name__, args,
                                                    kwargs))

    # finally replay the function with the correct arguments
    result = func(*args, **kwargs)
    logger.info("Result (JSON): {}".format(json_dumps(result)))
示例#7
0
def standalone(context,
        workflow,
        domain,
        workflow_id,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        input,
        nb_workers,
        heartbeat):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    if not workflow_id:
        workflow_id = get_workflow(workflow).name

    task_list = get_task_list(workflow_id)
    logger.info('using task list {}'.format(task_list))
    decider_proc = multiprocessing.Process(
        target=decider.command.start,
        args=(
            [workflow],
            domain,
            task_list,
        )
    )
    decider_proc.start()

    worker_proc = multiprocessing.Process(
        target=worker.command.start,
        args=(
            workflow,
            domain,
            task_list,
            nb_workers,
            heartbeat,
        )
    )
    worker_proc.start()

    print >> sys.stderr, 'starting workflow {}'.format(workflow)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        input,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if ex.status == ex.STATUS_CLOSED:
            print >> sys.stderr, 'execution {} finished'.format(
                ex.workflow_id,
            )
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()
示例#8
0
def terminate_workflow(domain, workflow_id, run_id):
    ex = helpers.get_workflow_execution(domain, workflow_id, run_id)
    ex.terminate()
示例#9
0
def standalone(context,
               workflow,
               domain,
               workflow_id,
               execution_timeout,
               tags,
               decision_tasks_timeout,
               input,
               input_file,
               nb_workers,
               nb_deciders,
               heartbeat,
               display_status,
               repair,
               force_activities,
               ):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    disable_boto_connection_pooling()

    if force_activities and not repair:
        raise ValueError(
            "You should only use --force-activities with --repair."
        )

    if not workflow_id:
        workflow_id = get_workflow(workflow).name

    wf_input = None
    if input or input_file:
        wf_input = get_or_load_input(input_file, input)

    if repair:
        repair_run_id = None
        if " " in repair:
            repair, repair_run_id = repair.split(" ", 1)
        # get the previous execution history, it will serve as "default history"
        # for activities that succeeded in the previous execution
        logger.info(
            'retrieving history of previous execution: domain={} '
            'workflow_id={} run_id={}'.format(domain, repair, repair_run_id)
        )
        previous_history = get_workflow_history(domain, repair, run_id=repair_run_id)
        previous_history.parse()
        # get the previous execution input if none passed
        if not input and not input_file:
            wf_input = previous_history.events[0].input
    else:
        previous_history = None

    task_list = create_unique_task_list(workflow_id)
    logger.info('using task list {}'.format(task_list))
    decider_proc = multiprocessing.Process(
        target=decider.command.start,
        args=(
            [workflow],
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_deciders,
            'repair_with': previous_history,
            'force_activities': force_activities,
            'is_standalone': True,
        },
    )
    decider_proc.start()

    worker_proc = multiprocessing.Process(
        target=worker.command.start,
        args=(
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_workers,
            'heartbeat': heartbeat,
        },
    )
    worker_proc.start()

    print('starting workflow {}'.format(workflow), file=sys.stderr)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        json_dumps(wf_input),
        None,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if display_status:
            print('status: {}'.format(ex.status), file=sys.stderr)
        if ex.status == ex.STATUS_CLOSED:
            print('execution {} finished'.format(ex.workflow_id), file=sys.stderr)
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()
示例#10
0
def terminate_workflow(domain, workflow_id, run_id):
    ex = helpers.get_workflow_execution(domain, workflow_id, run_id)
    ex.terminate()
示例#11
0
    def test_not_standalone(self):
        decider_proc = multiprocessing.Process(
            target=decider.command.start,
            args=(
                [
                    'tests.integration.workflow.ChainTestWorkflow',
                    'tests.integration.workflow.TestRunChild'
                ],
                self.domain,
                None,
            ),
            kwargs={
                'nb_processes': 1,
                'repair_with': None,
                'force_activities': False,
                'is_standalone': False,
            },
        )
        decider_proc.start()

        worker_proc = multiprocessing.Process(
            target=worker.command.start,
            args=(
                self.domain,
                'quickstart',
            ),
            kwargs={
                'nb_processes': 1,
                'heartbeat': 10,
            },
        )
        worker_proc.start()

        ex = start_workflow.callback(
            'tests.integration.workflow.TestRunChild',
            self.domain,
            self.workflow_id,
            task_list=None,
            execution_timeout='10',
            tags=None,
            decision_tasks_timeout='10',
            input='[]',
            input_file=None,
            local=False,
        )
        while True:
            time.sleep(1)
            ex = helpers.get_workflow_execution(
                self.domain,
                ex.workflow_id,
                ex.run_id,
            )
            if ex.status == ex.STATUS_CLOSED:
                break

        expect(ex.status).to.equal(ex.STATUS_CLOSED)
        expect(ex.close_status).to.equal(ex.CLOSE_STATUS_COMPLETED)
        os.kill(worker_proc.pid, signal.SIGTERM)
        worker_proc.join()
        os.kill(decider_proc.pid, signal.SIGTERM)
        decider_proc.join()
示例#12
0
def standalone(
    context,
    workflow,
    domain,
    workflow_id,
    execution_timeout,
    tags,
    decision_tasks_timeout,
    input,
    input_file,
    nb_workers,
    nb_deciders,
    heartbeat,
    display_status,
    repair,
    force_activities,
):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    disable_boto_connection_pooling()

    if force_activities and not repair:
        raise ValueError(
            "You should only use --force-activities with --repair.")

    workflow_class = get_workflow(workflow)
    if not workflow_id:
        workflow_id = workflow_class.name

    wf_input = {}
    if input or input_file:
        wf_input = get_or_load_input(input_file, input)

    if repair:
        repair_run_id = None
        if " " in repair:
            repair, repair_run_id = repair.split(" ", 1)
        # get the previous execution history, it will serve as "default history"
        # for activities that succeeded in the previous execution
        logger.info("retrieving history of previous execution: domain={} "
                    "workflow_id={} run_id={}".format(domain, repair,
                                                      repair_run_id))
        workflow_execution = get_workflow_execution(domain,
                                                    repair,
                                                    run_id=repair_run_id)
        previous_history = History(workflow_execution.history())
        repair_run_id = workflow_execution.run_id
        previous_history.parse()
        # get the previous execution input if none passed
        if not input and not input_file:
            wf_input = previous_history.events[0].input
        if not tags:
            tags = workflow_execution.tag_list
    else:
        previous_history = None
        repair_run_id = None
        if not tags:
            get_tag_list = getattr(workflow_class, "get_tag_list", None)
            if get_tag_list:
                tags = get_tag_list(workflow_class, *wf_input.get("args", ()),
                                    **wf_input.get("kwargs", {}))
            else:
                tags = getattr(workflow_class, "tag_list", None)
            if tags == Workflow.INHERIT_TAG_LIST:
                tags = None

    task_list = create_unique_task_list(workflow_id)
    logger.info("using task list {}".format(task_list))
    decider_proc = multiprocessing.Process(
        target=decider.command.start,
        args=(
            [workflow],
            domain,
            task_list,
        ),
        kwargs={
            "nb_processes": nb_deciders,
            "repair_with": previous_history,
            "force_activities": force_activities,
            "is_standalone": True,
            "repair_workflow_id": repair or None,
            "repair_run_id": repair_run_id,
        },
    )
    decider_proc.start()

    worker_proc = multiprocessing.Process(
        target=worker.command.start,
        args=(
            domain,
            task_list,
        ),
        kwargs={
            "nb_processes": nb_workers,
            "heartbeat": heartbeat,
        },
    )
    worker_proc.start()

    print("starting workflow {}".format(workflow), file=sys.stderr)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        format.input(wf_input),
        None,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if display_status:
            print("status: {}".format(ex.status), file=sys.stderr)
        if ex.status == ex.STATUS_CLOSED:
            print("execution {} finished".format(ex.workflow_id),
                  file=sys.stderr)
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()
示例#13
0
def standalone(context,
               workflow,
               domain,
               workflow_id,
               execution_timeout,
               tags,
               decision_tasks_timeout,
               input,
               input_file,
               nb_workers,
               nb_deciders,
               heartbeat,
               display_status,
               repair,
               force_activities,
               ):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    disable_boto_connection_pooling()

    if force_activities and not repair:
        raise ValueError(
            "You should only use --force-activities with --repair."
        )

    workflow_class = get_workflow(workflow)
    if not workflow_id:
        workflow_id = workflow_class.name

    wf_input = {}
    if input or input_file:
        wf_input = get_or_load_input(input_file, input)

    if repair:
        repair_run_id = None
        if " " in repair:
            repair, repair_run_id = repair.split(" ", 1)
        # get the previous execution history, it will serve as "default history"
        # for activities that succeeded in the previous execution
        logger.info(
            'retrieving history of previous execution: domain={} '
            'workflow_id={} run_id={}'.format(domain, repair, repair_run_id)
        )
        workflow_execution = get_workflow_execution(domain, repair, run_id=repair_run_id)
        previous_history = History(workflow_execution.history())
        repair_run_id = workflow_execution.run_id
        previous_history.parse()
        # get the previous execution input if none passed
        if not input and not input_file:
            wf_input = previous_history.events[0].input
        if not tags:
            tags = workflow_execution.tag_list
    else:
        previous_history = None
        repair_run_id = None
        if not tags:
            get_tag_list = getattr(workflow_class, 'get_tag_list', None)
            if get_tag_list:
                tags = get_tag_list(workflow_class, *wf_input.get('args', ()), **wf_input.get('kwargs', {}))
            else:
                tags = getattr(workflow_class, 'tag_list', None)
            if tags == Workflow.INHERIT_TAG_LIST:
                tags = None

    task_list = create_unique_task_list(workflow_id)
    logger.info('using task list {}'.format(task_list))
    decider_proc = multiprocessing.Process(
        target=decider.command.start,
        args=(
            [workflow],
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_deciders,
            'repair_with': previous_history,
            'force_activities': force_activities,
            'is_standalone': True,
            'repair_workflow_id': repair or None,
            'repair_run_id': repair_run_id,
        },
    )
    decider_proc.start()

    worker_proc = multiprocessing.Process(
        target=worker.command.start,
        args=(
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_workers,
            'heartbeat': heartbeat,
        },
    )
    worker_proc.start()

    print('starting workflow {}'.format(workflow), file=sys.stderr)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        format.input(wf_input),
        None,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if display_status:
            print('status: {}'.format(ex.status), file=sys.stderr)
        if ex.status == ex.STATUS_CLOSED:
            print('execution {} finished'.format(ex.workflow_id), file=sys.stderr)
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()