示例#1
0
    def add_child_workflow_start_initiated(self,
                                           workflow,
                                           workflow_id=None,
                                           task_list=None,
                                           input=None,
                                           control=None,
                                           tag_list=None,
                                           task_start_to_close_timeout=0):
        if control is None:
            control = {}

        self.events.append(EventFactory({
            'eventId': self.next_id,
            'eventType': 'StartChildWorkflowExecutionInitiated',
            'eventTimestamp': new_timestamp_string(),
            'startChildWorkflowExecutionInitiatedEventAttributes': {
                'control': json_dumps(control),
                'childPolicy': 'TERMINATE',
                'decisionTaskCompletedEventId': 76,
                'executionStartToCloseTimeout': '432000',
                'input': json_dumps(input) if input is not None else '{}',
                'tagList': tag_list,
                'taskList': {'name': task_list},
                'taskStartToCloseTimeout': task_start_to_close_timeout,
                'workflowId': workflow_id,
                'workflowType': {
                    'name': workflow.name,
                    'version': workflow.version
                }
            }
        }))

        return self
示例#2
0
    def add_activity_task_scheduled(self, activity, decision_id,
                                    activity_id=None,
                                    input=None,
                                    control=None):
        if control is None:
            control = {}

        self.events.append(EventFactory({
            "eventId": len(self.events) + 1,
            "eventType": "ActivityTaskScheduled",
            "eventTimestamp": new_timestamp_string(),
            "activityTaskScheduledEventAttributes": {
                'control': (json_dumps(control) if
                            control is not None else None),
                "taskList": {
                    "name": activity.task_list,
                },
                "scheduleToCloseTimeout": activity.task_schedule_to_close_timeout,
                "activityType": {
                    "name": activity.name,
                    "version": activity.version,
                },
                "heartbeatTimeout": activity.task_heartbeat_timeout,
                "activityId": (activity_id if activity_id is not None else
                               '{}-{}'.format(
                                   activity.name, hash(activity.name))),
                "scheduleToStartTimeout": activity.task_schedule_to_start_timeout,
                "decisionTaskCompletedEventId": decision_id,
                "input": json_dumps(input if input is not None else {}),
                "startToCloseTimeout": activity.task_start_to_close_timeout,
            }
        }))

        return self
示例#3
0
def jsonify(values, headers):
    if headers:
        return json_dumps(
            [dict(zip(headers, value)) for value in values]
        )
    else:
        return json_dumps(values)
示例#4
0
    def test_json_dumps_futures(self):
        resolved = Future()
        resolved.set_finished("foo")
        self.assertEqual(json_dumps(resolved), '"foo"')

        pending = Future()
        with self.assertRaises(ExecutionBlocked):
            json_dumps(pending)
示例#5
0
def activity_rerun(domain,
                   workflow_id,
                   run_id,
                   input,
                   scheduled_id,
                   activity_id):
    # handle params
    if not activity_id and not scheduled_id:
        logger.error("Please supply --scheduled-id or --activity-id.")
        sys.exit(1)

    input_override = None
    if input:
        input_override = format.decode(input)

    # find workflow execution
    try:
        wfe = helpers.get_workflow_execution(domain, workflow_id, run_id)
    except (swf.exceptions.DoesNotExistError, IndexError):
        logger.error("Couldn't find execution, exiting.")
        sys.exit(1)
    logger.info("Found execution: workflowId={} runId={}".format(wfe.workflow_id, wfe.run_id))

    # now rerun the specified activity
    history = History(wfe.history())
    history.parse()
    task, args, kwargs, meta, params = helpers.find_activity(
        history, scheduled_id=scheduled_id, activity_id=activity_id, input=input_override,
    )
    logger.debug("Found activity. Last execution:")
    for line in json_dumps(params, pretty=True).split("\n"):
        logger.debug(line)
    if input_override:
        logger.info("NB: input will be overriden with the passed one!")
    logger.info("Will re-run: {}(*{}, **{}) [+meta={}]".format(task, args, kwargs, meta))

    # download binaries if needed
    download_binaries(meta.get("binaries", {}))

    # execute the activity task with the correct arguments
    instance = ActivityTask(task, *args, **kwargs)
    result = instance.execute()
    if hasattr(instance, 'post_execute'):
        instance.post_execute()
    logger.info("Result (JSON): {}".format(json_dumps(result, compact=False)))
示例#6
0
    def process(self, poller, token, task):
        """

        :param poller:
        :type poller: ActivityPoller
        :param token:
        :type token: str
        :param task:
        :type task: swf.models.ActivityTask
        """
        logger.debug('ActivityWorker.process() pid={}'.format(os.getpid()))
        try:
            activity = self.dispatch(task)
            input = format.decode(task.input)
            args = input.get('args', ())
            kwargs = input.get('kwargs', {})
            context = sanitize_activity_context(task.context)
            context['domain_name'] = poller.domain.name
            if input.get('meta', {}).get('binaries'):
                download_binaries(input['meta']['binaries'])
            result = ActivityTask(activity, *args, context=context, **kwargs).execute()
        except Exception:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            logger.exception("process error: {}".format(str(exc_value)))
            if isinstance(exc_value, ExecutionError) and len(exc_value.args):
                details = exc_value.args[0]
                reason = format_exc(exc_value)  # FIXME json.loads and rebuild?
            else:
                tb = traceback.format_tb(exc_traceback)
                reason = format_exc(exc_value)
                details = json_dumps(
                    {
                        'error': exc_type.__name__,
                        'message': str(exc_value),
                        'traceback': tb,
                    },
                    default=repr
                )
            return poller.fail_with_retry(
                token,
                task,
                reason=reason,
                details=details
            )

        try:
            logger.info('completing activity')
            poller.complete_with_retry(token, result)
        except Exception as err:
            logger.exception("complete error")
            reason = 'cannot complete task {}: {} {}'.format(
                task.activity_id,
                err.__class__.__name__,
                err,
            )
            poller.fail_with_retry(token, task, reason)
示例#7
0
 def identity(self):
     if self.process_mode == "kubernetes":
         self.job_name = "{}--{}".format(to_k8s_identifier(self.task_list), str(uuid.uuid4()))
         return json_dumps({
             "cluster": os.environ["K8S_CLUSTER"],
             "namespace": os.environ["K8S_NAMESPACE"],
             "job": self.job_name,
         })
     else:
         return super(ActivityPoller, self).identity
示例#8
0
    def test_proxy(self):
        from lazy_object_proxy import Proxy

        def unwrap():
            return "foo"

        data = {
            "args": [Proxy(unwrap)]
        }
        expected = '{"args":["foo"]}'
        actual = json_dumps(data)
        self.assertEqual(expected, actual)
示例#9
0
    def add_marker(self, name, details=None):
        self.events.append(EventFactory({
            'eventId': self.next_id,
            'eventTimestamp': new_timestamp_string(),
            'eventType': 'MarkerRecorded',
            'markerRecordedEventAttributes': {
                'details': json_dumps(details) if details is not None else '{}',
                'markerName': name,
            }
        }))

        return self
示例#10
0
 def test_set(self):
     data = [
         {1, 2, 3},
         frozenset([-1, -2, -3]),
     ]
     expected = [
         [1, 2, 3],
         [-1, -2, -3],
     ]
     actual = json_dumps(data)
     actual = json.loads(actual)
     self.assertEqual(sorted(expected[0]), sorted(actual[0]))
     self.assertEqual(sorted(expected[1]), sorted(actual[1]))
示例#11
0
    def add_signal(self, name, input=None, external_event_id=0):
        self.events.append(EventFactory({
            'eventId': self.next_id,
            'eventTimestamp': new_timestamp_string(),
            'eventType': 'WorkflowExecutionSignaled',
            'workflowExecutionSignaledEventAttributes': {
                'externalInitiatedEventId': external_event_id,
                'input': json_dumps(input) if input is not None else '{}',
                'signalName': name,
            }
        }))

        return self
示例#12
0
    def add_activity_task_completed(self, scheduled, started,
                                    result=None):
        self.events.append(EventFactory({
            "eventId": len(self.events) + 1,
            "eventType": "ActivityTaskCompleted",
            "eventTimestamp": new_timestamp_string(),
            "activityTaskCompletedEventAttributes": {
                "startedEventId": started,
                "scheduledEventId": scheduled,
                "result": json_dumps(result) if result is not None else None,
            }
        }))

        return self
示例#13
0
 def test_json_non_compact(self):
     cases = [
         [None,       'null'],
         [1,          '1'],
         ["a",        '"a"'],
         [[1, 2],     '[1, 2]'],
         [(1, 2),     '[1, 2]'],
         [{'a': 'b'}, '{"a": "b"}'],
     ]
     for case in cases:
         self.assertEqual(
             json_dumps(case[0], compact=False),
             case[1],
         )
示例#14
0
    def add_activity_task_completed(self, scheduled, started, result=None):
        self.events.append(
            EventFactory({
                "eventId": len(self.events) + 1,
                "eventType": "ActivityTaskCompleted",
                "eventTimestamp": new_timestamp_string(),
                "activityTaskCompletedEventAttributes": {
                    "startedEventId": started,
                    "scheduledEventId": scheduled,
                    "result":
                    json_dumps(result) if result is not None else None,
                }
            }))

        return self
示例#15
0
    def add_child_workflow_start_initiated(self,
                                           workflow,
                                           workflow_id=None,
                                           task_list=None,
                                           input=None,
                                           control=None,
                                           tag_list=None,
                                           task_start_to_close_timeout=0):
        if control is None:
            control = {}

        self.events.append(
            EventFactory({
                'eventId': self.next_id,
                'eventType': 'StartChildWorkflowExecutionInitiated',
                'eventTimestamp': new_timestamp_string(),
                'startChildWorkflowExecutionInitiatedEventAttributes': {
                    'control': json_dumps(control),
                    'childPolicy': 'TERMINATE',
                    'decisionTaskCompletedEventId': 76,
                    'executionStartToCloseTimeout': '432000',
                    'input': json_dumps(input) if input is not None else '{}',
                    'tagList': tag_list,
                    'taskList': {
                        'name': task_list
                    },
                    'taskStartToCloseTimeout': task_start_to_close_timeout,
                    'workflowId': workflow_id,
                    'workflowType': {
                        'name': workflow.name,
                        'version': workflow.version
                    }
                }
            }))

        return self
示例#16
0
 def add_timer_started(self, timer_id, timeout, control=None, decision_id=0):
     d = {
         "decisionTaskCompletedEventId": decision_id,
         'startToFireTimeout': str(timeout),
         'timerId': timer_id,
     }
     if control is not None:
         d['control'] = json_dumps(control)
     self.events.append(EventFactory({
         'eventId': self.next_id,
         'eventTimestamp': new_timestamp_string(),
         'eventType': 'TimerStarted',
         'timerStartedEventAttributes': d
     }))
     return self
示例#17
0
def test_workflow_with_two_tasks():
    workflow = ATestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* requires the result of *increment*, hold by the *a* future.
    # Hence the executor schedule *increment*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # Now ``a.result``contains the result of *increment*'s that is finished.
    # The line ``return b.result`` requires the computation of *double* with
    # ``a.result``, then the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # *double* has completed and the ``b.result``is now available. The executor
    # should complete the workflow and its result to ``b.result``.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
示例#18
0
 def test_json_dumps_basics(self):
     d = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
     cases = [
         [None,         'null'],
         [1,            '1'],
         ["a",          '"a"'],
         [[1, 2],       '[1,2]'],
         [(1, 2),       '[1,2]'],
         [{'a': 'b'},   '{"a":"b"}'],
         [{'start': d}, '{"start":"1970-01-01T00:00:00Z"}'],
     ]
     for case in cases:
         self.assertEqual(
             json_dumps(case[0]),
             case[1],
         )
示例#19
0
def test_workflow_with_two_tasks_not_completed():
    """
    This test checks how the executor behaves when a task is still running.
    """
    workflow = ATestDefinitionWithInput
    executor = Executor(DOMAIN, workflow)

    arg = 4
    result = 5
    history = builder.History(workflow, input={'args': (arg, )})

    # The executor should schedule *increment*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task in state ``started`` to the history.
    decision_id = history.last_id
    scheduled_id = decision_id + 1
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='started',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=5).add_decision_task_scheduled().add_decision_task_started())

    # The executor cannot schedule any other task, it returns an empty
    # decision.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 0

    # Let's now set the task as ``completed`` in the history.
    decision_id = history.last_id
    (history.add_activity_task_completed(scheduled=scheduled_id,
                                         started=scheduled_id + 1,
                                         result=result).
     add_decision_task_scheduled().add_decision_task_started())

    # As there is a single task and it is now finished, the executor should
    # complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(result))

    assert decisions[0] == workflow_completed
示例#20
0
def test_workflow_reuse_same_future():
    workflow = ATestDefinitionSameFuture
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* depends on *increment*, then the executor should only schedule
    # *increment* at first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        input={
            'args': 1
        },
        activity_id='activity-tests.data.activities.increment-1',
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # *increment* is finished, the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # The executor should now complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
示例#21
0
    def continue_as_new(self,
                        child_policy=None,
                        execution_timeout=None,
                        task_timeout=None,
                        input=None,
                        tag_list=None,
                        task_list=None,
                        workflow_type_version=None):
        """Coninue as new workflow execution decision builder
        :param  child_policy: specifies the policy to use for the
                              child workflow executions of the new execution
        :type   child_policy: CHILD_POLICIES.{TERMINATE | REQUEST_CANCEL | ABANDON}

        :param  execution_timeout: specifies the total duration for this workflow execution
        :type   execution_timeout: str

        :param  input: The input provided to the new workflow execution
        :type   input: dict

        :param  tag_list: list of tags to associate with the new workflow execution
        :type   tag_list: list

        :param  task_list: task list name
        :type   task_list: str

        :param  task_timeout: maximum duration of decision tasks for the new workflow execution
        :type   task_timeout: str

        :param  workflow_type_version: workflow type version the execution shold belong to
        :type   workflow_type_version: str
        """
        if input is not None:
            input = json_dumps(input)

        self.update_attributes({
            'childPolicy': child_policy,
            'executionStartToCloseTimeout': execution_timeout,
            'taskStartToCloseTimeout': task_timeout,
            'input': input,
            'tagList': tag_list,
            'taskList': task_list,
            'workflowTypeVersion': workflow_type_version,
        })
示例#22
0
def test_workflow_retry_activity():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # There is a single task, hence the executor should schedule it first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # As the retry value is one, the executor should retry i.e. schedule the
    # task again.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``completed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment_retry-1',
        input={
            'args': 7
        },
        result=8).add_decision_task_scheduled().add_decision_task_started())

    # Now the task is finished and the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(8))

    assert decisions[0] == workflow_completed
示例#23
0
def test_workflow_retry_activity_failed_again():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # There is a single task, hence the executor should schedule it first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # As the retry value is one, the executor should retry i.e. schedule the
    # task again.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state again.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # There is no more retry. The executor should set `Future.exception` and
    # complete the workflow as there is no further task.
    decisions, _ = executor.replay(Response(history=history))

    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    # ``a.result`` is ``None`` because it was not set.
    workflow_completed.complete(result=json_dumps(None))

    assert decisions[0] == workflow_completed
示例#24
0
 def add_timer_started(self,
                       timer_id,
                       timeout,
                       control=None,
                       decision_id=0):
     d = {
         "decisionTaskCompletedEventId": decision_id,
         "startToFireTimeout": str(timeout),
         "timerId": timer_id,
     }
     if control is not None:
         d["control"] = json_dumps(control)
     self.events.append(
         EventFactory({
             "eventId": self.next_id,
             "eventTimestamp": new_timestamp_string(),
             "eventType": "TimerStarted",
             "timerStartedEventAttributes": d,
         }))
     return self
示例#25
0
 def add_timer_started(self,
                       timer_id,
                       timeout,
                       control=None,
                       decision_id=0):
     d = {
         "decisionTaskCompletedEventId": decision_id,
         'startToFireTimeout': str(timeout),
         'timerId': timer_id,
     }
     if control is not None:
         d['control'] = json_dumps(control)
     self.events.append(
         EventFactory({
             'eventId': self.next_id,
             'eventTimestamp': new_timestamp_string(),
             'eventType': 'TimerStarted',
             'timerStartedEventAttributes': d
         }))
     return self
示例#26
0
    def _make_task_id(self, a_task, *args, **kwargs):
        """
        Assign a new ID to *a_task*.

        :type a_task: ActivityTask | WorkflowTask
        :returns:
            String with at most 256 characters.
        :rtype: str

        """
        if isinstance(a_task, ActivityTask) and hasattr(
                a_task.activity.callable, 'get_task_id'):
            suffix = a_task.activity.callable.get_task_id(
                self.workflow, *args, **kwargs)
        elif not a_task.idempotent:
            # If idempotency is False or unknown, let's generate a task id by
            # incrementing an id after the a_task name.
            # (default strategy, backwards compatible with previous versions)
            suffix = self._tasks.add(a_task)
        else:
            # If a_task is idempotent, we can do better and hash arguments.
            # It makes the workflow resistant to retries or variations on the
            # same task name (see #11).
            arguments = json_dumps({
                "args": args,
                "kwargs": kwargs
            },
                                   sort_keys=True)
            suffix = hashlib.md5(arguments.encode('utf-8')).hexdigest()

        if isinstance(a_task, (WorkflowTask, )):
            # Some task types must have globally unique names.
            suffix = '{}--{}--{}'.format(self._workflow_id,
                                         hex_hash(self._run_id), suffix)

        task_id = '{name}-{suffix}'.format(name=a_task.name, suffix=suffix)
        if len(task_id) > 256:  # Better safe than sorry...
            task_id = task_id[0:223] + "-" + hashlib.md5(
                task_id.encode('utf-8')).hexdigest()

        return task_id
示例#27
0
    def add_decision_task_completed(self, scheduled=None, started=None,
                                    execution_context=None):
        if scheduled is None:
            scheduled = self.last_id - 1

        if started is None:
            started = self.last_id

        self.events.append(EventFactory({
            "eventId": len(self.events) + 1,
            "eventType": "DecisionTaskCompleted",
            "eventTimestamp": new_timestamp_string(),
            "decisionTaskCompletedEventAttributes": {
                "startedEventId": started,
                "scheduledEventId": scheduled,
                "executionContext": (json_dumps(execution_context) if
                                     execution_context is not None else None),
            }
        }))

        return self
示例#28
0
def swf_identity():
    # basic identity
    identity = {
        'user': getpass.getuser(),  # system's user
        'hostname': socket.gethostname(),  # main hostname
        'pid': os.getpid(),  # current pid
    }

    # adapt with extra keys from env
    if "SIMPLEFLOW_IDENTITY" in os.environ:
        try:
            extra_keys = json.loads(os.environ["SIMPLEFLOW_IDENTITY"])
        except Exception:
            extra_keys = {}
        for key, value in iteritems(extra_keys):
            identity[key] = value

    # remove null values
    identity = {k: v for k, v in iteritems(identity) if v is not None}

    # serialize the result
    return json_dumps(identity)
示例#29
0
    def __init__(self, workflow, input=None, tag_list=None):
        """
        Bootstrap a history with the first events added by SWF.

        :param workflow: workflow to simulate
        :type  workflow: declarative.Workflow
        :param input: JSON serializable dict
        :type  input: dict
        :param tag_list: string of tags (beware not a list)
        :type  tag_list: str

        """
        self._workflow = workflow
        self.events = [
            EventFactory({
                "eventId": 1,
                "eventType": "WorkflowExecutionStarted",
                "eventTimestamp": new_timestamp_string(),
                "workflowExecutionStartedEventAttributes": {
                    "taskList": {
                        "name": workflow.task_list,
                    },
                    "parentInitiatedEventId": 0,
                    "taskStartToCloseTimeout":
                        workflow.decision_tasks_timeout,
                    "childPolicy": "TERMINATE",
                    "executionStartToCloseTimeout":
                        workflow.execution_timeout,
                    "input": json_dumps(input if input else {}),
                    "workflowType": {
                        "name": workflow.name,
                        "version": workflow.version
                    },
                    "tagList": tag_list or getattr(workflow, 'tag_list', None)
                }
            })
        ]
        self.add_decision_task_scheduled()
        self.add_decision_task_started(len(self.events))
示例#30
0
def swf_identity():
    # basic identity
    identity = {
        'user': getpass.getuser(),          # system's user
        'hostname': socket.gethostname(),   # main hostname
        'pid': os.getpid(),                 # current pid
    }

    # adapt with extra keys from env
    if "SIMPLEFLOW_IDENTITY" in os.environ:
        try:
            extra_keys = json.loads(os.environ["SIMPLEFLOW_IDENTITY"])
        except Exception:
            extra_keys = {}
        for key, value in iteritems(extra_keys):
            identity[key] = value

    # remove null values
    identity = {k: v for k, v in iteritems(identity) if v is not None}

    # serialize the result
    return json_dumps(identity)
示例#31
0
    def _make_task_id(self, a_task, *args, **kwargs):
        """
        Assign a new ID to *a_task*.

        :type a_task: ActivityTask | WorkflowTask
        :returns:
            String with at most 256 characters.

        """
        if not a_task.idempotent:
            # If idempotency is False or unknown, let's generate a task id by
            # incrementing and id after the a_task name.
            # (default strategy, backwards compatible with previous versions)
            suffix = self._tasks.add(a_task)
        else:
            # If a_task is idempotent, we can do better and hash arguments.
            # It makes the workflow resistant to retries or variations on the
            # same task name (see #11).
            arguments = json_dumps({"args": args, "kwargs": kwargs})
            suffix = hashlib.md5(arguments).hexdigest()

        task_id = '{name}-{idx}'.format(name=a_task.name, idx=suffix)
        return task_id
示例#32
0
    def signal(self, signal_name, input=None, *args, **kwargs):
        """Records a signal event in the workflow execution history and
        creates a decision task.

        The signal event is recorded with the specified user defined
        ``signal_name`` and ``input`` (if provided).

        :param  signal_name: The name of the signal. This name must be
                             meaningful to the target workflow.
        :type   signal_name: str

        :param  input: Data to attach to the WorkflowExecutionSignaled
                       event in the target workflow execution’s history.
        :type   input: dict
        """
        if input is None:
            input = {}
        self.connection.signal_workflow_execution(
            self.domain.name,
            signal_name,
            self.workflow_id,
            input=json_dumps(input),
            run_id=self.run_id)
示例#33
0
    def __init__(self, workflow, input=None, tag_list=None):
        """
        Bootstrap a history with the first events added by SWF.

        :param workflow: workflow to simulate
        :type  workflow: declarative.Workflow
        :param input: JSON serializable dict
        :type  input: dict
        :param tag_list: string of tags (beware not a list)
        :type  tag_list: str

        """
        self._workflow = workflow
        self.events = [
            EventFactory({
                "eventId": 1,
                "eventType": "WorkflowExecutionStarted",
                "eventTimestamp": new_timestamp_string(),
                "workflowExecutionStartedEventAttributes": {
                    "taskList": {
                        "name": workflow.task_list,
                    },
                    "parentInitiatedEventId": 0,
                    "taskStartToCloseTimeout": workflow.decision_tasks_timeout,
                    "childPolicy": "TERMINATE",
                    "executionStartToCloseTimeout": workflow.execution_timeout,
                    "input": json_dumps(input if input else {}),
                    "workflowType": {
                        "name": workflow.name,
                        "version": workflow.version
                    },
                    "tagList": tag_list or getattr(workflow, 'tag_list', None)
                }
            })
        ]
        self.add_decision_task_scheduled()
        self.add_decision_task_started(len(self.events))
示例#34
0
def swf_identity():
    # basic identity
    pid = os.getpid()
    identity = {
        "user": getpass.getuser(),  # system's user
        "hostname": socket.gethostname(),  # main hostname
        "pid": pid,  # current pid
        "exe": psutil.Process(pid).exe(),  # executable path
    }

    # adapt with extra keys from env
    if "SIMPLEFLOW_IDENTITY" in os.environ:
        try:
            extra_keys = json.loads(os.environ["SIMPLEFLOW_IDENTITY"])
        except Exception:
            extra_keys = {}
        for key, value in iteritems(extra_keys):
            identity[key] = value

    # remove null values
    identity = {k: v for k, v in iteritems(identity) if v is not None}

    # serialize the result
    return json_dumps(identity)
示例#35
0
    def process(self, poller, token, task):
        """

        :param poller:
        :type poller: ActivityPoller
        :param token:
        :type token: str
        :param task:
        :type task: swf.models.ActivityTask
        """
        logger.debug('ActivityWorker.process() pid={}'.format(os.getpid()))
        activity = self.dispatch(task)
        input = json.loads(task.input)
        args = input.get('args', ())
        kwargs = input.get('kwargs', {})
        context = sanitize_activity_context(task.context)
        try:
            result = ActivityTask(activity, *args, context=context,
                                  **kwargs).execute()
        except Exception as err:
            logger.exception("process error: {}".format(str(err)))
            tb = traceback.format_exc()
            return poller.fail_with_retry(token,
                                          task,
                                          reason=str(err),
                                          details=tb)

        try:
            poller.complete_with_retry(token, json_dumps(result))
        except Exception as err:
            logger.exception("complete error")
            reason = 'cannot complete task {}: {}'.format(
                task.activity_id,
                err,
            )
            poller.fail_with_retry(token, task, reason)
示例#36
0
    def resume(self, a_task, *args, **kwargs):
        """Resume the execution of a task.
        Called by `submit`.

        If the task was scheduled, returns a future that wraps its state,
        otherwise schedules it.
        If in repair mode, we may fake the task to repair from the previous history.

        :param a_task:
        :type a_task: ActivityTask | WorkflowTask | SignalTask
        :param args:
        :param args: list
        :type kwargs:
        :type kwargs: dict
        :rtype: futures.Future
        :raise: exceptions.ExecutionBlocked if open activities limit reached
        """

        if not a_task.id:  # Can be already set (WorkflowTask)
            a_task.id = self._make_task_id(a_task, *args, **kwargs)
        event = self.find_event(a_task, self._history)
        logger.debug('executor: resume {}, event={}'.format(a_task, event))
        future = None

        # in repair mode, check if we absolutely want to re-execute this task
        force_execution = (self.force_activities
                           and self.force_activities.search(a_task.id))

        # try to fill in the blanks with the workflow we're trying to repair if any
        # TODO: maybe only do that for idempotent tasks?? (not enough information to decide?)
        if not event and self.repair_with and not force_execution:
            # try to find a former event matching this task
            former_event = self.find_event(a_task, self.repair_with)
            # ... but only keep the event if the task was successful
            if former_event and former_event['state'] == 'completed':
                logger.info('faking task completed successfully in previous '
                            'workflow: {}'.format(former_event['id']))
                json_hash = hashlib.md5(
                    json_dumps(former_event).encode('utf-8')).hexdigest()
                fake_task_list = "FAKE-" + json_hash

                # schedule task on a fake task list
                self.schedule_task(a_task, task_list=fake_task_list)
                future = futures.Future()

                # start a dedicated process to handle the fake activity
                run_fake_task_worker(self.domain.name, fake_task_list,
                                     former_event)

        # back to normal execution flow
        if event:
            ttf = self.EVENT_TYPE_TO_FUTURE.get(event['type'])
            if ttf:
                future = ttf(self, a_task, event)
            if event['type'] == 'activity':
                if future and future.state in (futures.PENDING,
                                               futures.RUNNING):
                    self._open_activity_count += 1

        if not future:
            self.schedule_task(a_task, task_list=self.task_list)
            future = futures.Future()  # return a pending future.

        if self._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT:
            logger.warning('limit of {} open activities reached'.format(
                constants.MAX_OPEN_ACTIVITY_COUNT))
            raise exceptions.ExecutionBlocked

        return future
示例#37
0
    def submit(self, func, *args, **kwargs):
        logger.info('executing task {}(args={}, kwargs={})'.format(
            func, args, kwargs))

        future = futures.Future()

        context = self.get_run_context()
        context["activity_id"] = str(self.nb_activities)
        self.nb_activities += 1

        # Ensure signals ordering
        if isinstance(func, SignalTask):
            self.signals_sent.add(func.name)
        elif isinstance(func, WaitForSignal):
            signal_name = func.signal_name
            if signal_name not in self.signals_sent:
                raise NotImplementedError(
                    'wait_signal({}) before signal was sent: unsupported by the local executor'
                    .format(signal_name))
        elif isinstance(func, MarkerTask):
            self._markers.setdefault(func.name,
                                     []).append(Marker(func.name,
                                                       func.details))

        if isinstance(func, Submittable):
            task = func  # *args, **kwargs already resolved.
            task.context = context
            func = getattr(task, 'activity', None)
        elif isinstance(func, Activity):
            task = ActivityTask(func, context=context, *args, **kwargs)
        elif issubclass(func, Workflow):
            task = WorkflowTask(self, func, *args, **kwargs)
        else:
            raise TypeError('invalid type {} for {}'.format(type(func), func))

        if isinstance(task, WorkflowTask):
            self.on_new_workflow(task)

        try:
            future._result = task.execute()
            if hasattr(task, 'post_execute'):
                task.post_execute()
            state = 'completed'
        except Exception:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            future._exception = exc_value
            logger.exception('rescuing exception: {}'.format(exc_value))
            if (isinstance(func, Activity)
                    or issubclass_(func, Workflow)) and getattr(
                        func, 'raises_on_failure', None):
                tb = traceback.format_tb(exc_traceback)
                message = format_exc(exc_value)
                details = json_dumps(
                    {
                        'error': exc_type.__name__,
                        'message': str(exc_value),
                        'traceback': tb,
                    },
                    default=repr)
                raise exceptions.TaskFailed(
                    func.name,
                    message,
                    details,
                )
            state = 'failed'
        finally:
            if isinstance(task, WorkflowTask):
                self.on_completed_workflow()
            future._state = futures.FINISHED

        if func:
            self._history.add_activity_task(func,
                                            decision_id=None,
                                            last_state=state,
                                            activity_id=context["activity_id"],
                                            input={
                                                'args': args,
                                                'kwargs': kwargs
                                            },
                                            result=future.result)
        return future
示例#38
0
 def test_json_dumps_pretty(self):
     self.assertEquals(
         json_dumps({"z": 1, "abc": "def"}, pretty=True),
         '{\n    "abc": "def",\n    "z": 1\n}',
     )
示例#39
0
def main():
    """
    When executed as a script, this module expects the name of a callable as
    its first argument and the arguments of the callable encoded in a JSON
    string as its second argument. It then executes the callable with the
    arguments after decoding them into Python objects. It finally encodes the
    value returned by the callable into a JSON string and prints it on stdout.

    the arguments of the callable are stored in a dict with the following
    format: ::

        {'args': [...],
         'kwargs': {
            ...,
         }
         }

    Synopsis
    --------

    ::
        usage: execute.py [-h] funcname funcargs

        positional arguments:
          funcname    name of the callable to execute
          funcargs    callable arguments in JSON

        optional arguments:
          -h, --help  show this help message and exit

    Examples
    --------

    ::
        $ python -m simpleflow.execute "os.path.exists" '{"args": ["/tmp"]}'
        true

    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'funcname',
        help='name of the callable to execute',
    )
    parser.add_argument(
        'funcargs',
        help='callable arguments in JSON',
    )
    parser.add_argument(
        '--context',
        help='Activity Context',
    )
    parser.add_argument(
        '--logger-name',
        help='logger name',
    )
    parser.add_argument(
        '--result-fd',
        type=int,
        default=1,
        metavar='N',
        help='result file descriptor',
    )
    parser.add_argument(
        '--error-fd',
        type=int,
        default=2,
        metavar='N',
        help='error file descriptor',
    )
    parser.add_argument(
        '--arguments-json-fd',
        type=int,
        default=None,
        metavar='N',
        help='JSON input file descriptor',
    )
    parser.add_argument(
        '--kill-children',
        action='store_true',
        help='kill child processes on exit',
    )
    cmd_arguments = parser.parse_args()

    def kill_child_processes():
        process = psutil.Process(os.getpid())
        children = process.children(recursive=True)

        for child in children:
            try:
                child.terminate()
            except psutil.NoSuchProcess:
                pass
        _, still_alive = psutil.wait_procs(children, timeout=0.3)
        for child in still_alive:
            try:
                child.kill()
            except psutil.NoSuchProcess:
                pass

    funcname = cmd_arguments.funcname
    if cmd_arguments.arguments_json_fd is None:
        content = cmd_arguments.funcargs
        if content is None:
            parser.error('the following arguments are required: funcargs')
    else:
        with os.fdopen(cmd_arguments.arguments_json_fd) as arguments_json_file:
            content = arguments_json_file.read()
    try:
        arguments = format.decode(content)
    except Exception:
        raise ValueError('cannot load arguments from {}'.format(content))
    if cmd_arguments.logger_name:
        logger = logging.getLogger(cmd_arguments.logger_name)
    else:
        logger = simpleflow_logger
    callable_ = make_callable(funcname)
    if hasattr(callable_, '__wrapped__'):
        callable_ = callable_.__wrapped__
    args = arguments.get('args', ())
    kwargs = arguments.get('kwargs', {})
    context = json.loads(
        cmd_arguments.context) if cmd_arguments.context is not None else None
    try:
        if hasattr(callable_, 'execute'):
            inst = callable_(*args, **kwargs)
            if context is not None:
                inst.context = context
            result = inst.execute()
            if hasattr(inst, 'post_execute'):
                inst.post_execute()
        else:
            if context is not None:
                callable_.context = context
            result = callable_(*args, **kwargs)
    except Exception as err:
        logger.error('Exception: {}'.format(err))
        exc_type, exc_value, exc_traceback = sys.exc_info()
        tb = traceback.format_tb(exc_traceback)
        details = json_dumps(
            {
                'error': exc_type.__name__,
                'message': str(exc_value),
                'traceback': tb,
            },
            default=repr,
        )
        if cmd_arguments.error_fd == 2:
            sys.stderr.flush()
        if not compat.PY2:
            details = details.encode('utf-8')
        os.write(cmd_arguments.error_fd, details)
        if cmd_arguments.kill_children:
            kill_child_processes()
        sys.exit(1)

    if cmd_arguments.result_fd == 1:  # stdout (legacy)
        sys.stdout.flush()  # may have print's in flight
        os.write(cmd_arguments.result_fd, b'\n')
    result = json_dumps(result)
    if not compat.PY2:
        result = result.encode('utf-8')
    os.write(cmd_arguments.result_fd, result)
    if cmd_arguments.kill_children:
        kill_child_processes()
示例#40
0
 def test_default(self):
     actual = json_dumps(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
     expected = '"1970-01-01T00:00:00Z"'
     self.assertEqual(expected, actual)
示例#41
0
 def test_default(self):
     actual = json_dumps(datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC))
     expected = '"1970-01-01T00:00:00+00:00"'
     self.assertEqual(expected, actual)
示例#42
0
    def submit(self, func, *args, **kwargs):
        logger.info('executing task {}(args={}, kwargs={})'.format(
            func, args, kwargs))

        future = futures.Future()

        context = self.get_run_context()
        context["activity_id"] = str(self.nb_activities)
        self.nb_activities += 1

        # Ensure signals ordering
        if isinstance(func, SignalTask):
            self.signals_sent.add(func.name)
        elif isinstance(func, WaitForSignal):
            signal_name = func.signal_name
            if signal_name not in self.signals_sent:
                raise NotImplementedError(
                    'wait_signal({}) before signal was sent: unsupported by the local executor'.format(signal_name)
                )
        elif isinstance(func, MarkerTask):
            self._markers.setdefault(func.name, []).append(Marker(func.name, func.details))

        if isinstance(func, Submittable):
            task = func  # *args, **kwargs already resolved.
            task.context = context
            func = getattr(task, 'activity', None)
        elif isinstance(func, Activity):
            task = ActivityTask(func, context=context, *args, **kwargs)
        elif issubclass(func, Workflow):
            task = WorkflowTask(self, func, *args, **kwargs)
        else:
            raise TypeError('invalid type {} for {}'.format(
                type(func), func))

        if isinstance(task, WorkflowTask):
            self.on_new_workflow(task)

        try:
            future._result = task.execute()
            if hasattr(task, 'post_execute'):
                task.post_execute()
            state = 'completed'
        except Exception:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            future._exception = exc_value
            logger.exception('rescuing exception: {}'.format(exc_value))
            if (isinstance(func, Activity) or issubclass_(func, Workflow)) and getattr(func, 'raises_on_failure', None):
                tb = traceback.format_tb(exc_traceback)
                message = format_exc(exc_value)
                details = json_dumps(
                    {
                        'error': exc_type.__name__,
                        'message': str(exc_value),
                        'traceback': tb,
                    },
                    default=repr
                )
                raise exceptions.TaskFailed(
                    func.name,
                    message,
                    details,
                )
            state = 'failed'
        finally:
            if isinstance(task, WorkflowTask):
                self.on_completed_workflow()
            future._state = futures.FINISHED

        if func:
            self._history.add_activity_task(
                func,
                decision_id=None,
                last_state=state,
                activity_id=context["activity_id"],
                input={'args': args, 'kwargs': kwargs},
                result=future.result)
        return future
示例#43
0
def control(message):
    return encode(json_dumps(message), constants.MAX_CONTROL_LENGTH)
示例#44
0
def main():
    """
    When executed as a script, this module expects the name of a callable as
    its first argument and the arguments of the callable encoded in a JSON
    string as its second argument. It then executes the callable with the
    arguments after decoding them into Python objects. It finally encodes the
    value returned by the callable into a JSON string and prints it on stdout.

    the arguments of the callable are stored in a dict with the following
    format: ::

        {'args': [...],
         'kwargs': {
            ...,
         }
         }

    Synopsis
    --------

    ::
        usage: execute.py [-h] funcname funcargs

        positional arguments:
          funcname    name of the callable to execute
          funcargs    callable arguments in JSON

        optional arguments:
          -h, --help  show this help message and exit

    Examples
    --------

    ::
        $ python -m simpleflow.execute "os.path.exists" '{"args": ["/tmp"]}'
        true

    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'funcname',
        help='name of the callable to execute',
    )
    parser.add_argument(
        'funcargs',
        help='callable arguments in JSON',
    )
    parser.add_argument(
        '--context',
        help='Activity Context',
    )
    parser.add_argument(
        '--logger-name',
        help='logger name',
    )
    parser.add_argument(
        '--result-fd',
        type=int,
        default=1,
        metavar='N',
        help='result file descriptor',
    )
    parser.add_argument(
        '--error-fd',
        type=int,
        default=2,
        metavar='N',
        help='error file descriptor',
    )
    parser.add_argument(
        '--arguments-json-fd',
        type=int,
        default=None,
        metavar='N',
        help='JSON input file descriptor',
    )
    parser.add_argument(
        '--kill-children',
        action='store_true',
        help='kill child processes on exit',
    )
    cmd_arguments = parser.parse_args()

    def kill_child_processes():
        process = psutil.Process(os.getpid())
        children = process.children(recursive=True)

        for child in children:
            try:
                child.terminate()
            except psutil.NoSuchProcess:
                pass
        _, still_alive = psutil.wait_procs(children, timeout=0.3)
        for child in still_alive:
            try:
                child.kill()
            except psutil.NoSuchProcess:
                pass

    funcname = cmd_arguments.funcname
    if cmd_arguments.arguments_json_fd is None:
        content = cmd_arguments.funcargs
        if content is None:
            parser.error('the following arguments are required: funcargs')
    else:
        with os.fdopen(cmd_arguments.arguments_json_fd) as arguments_json_file:
            content = arguments_json_file.read()
    try:
        arguments = format.decode(content)
    except Exception:
        raise ValueError('cannot load arguments from {}'.format(
            content))
    if cmd_arguments.logger_name:
        logger = logging.getLogger(cmd_arguments.logger_name)
    else:
        logger = simpleflow_logger
    callable_ = make_callable(funcname)
    if hasattr(callable_, '__wrapped__'):
        callable_ = callable_.__wrapped__
    args = arguments.get('args', ())
    kwargs = arguments.get('kwargs', {})
    context = json.loads(cmd_arguments.context) if cmd_arguments.context is not None else None
    try:
        if hasattr(callable_, 'execute'):
            inst = callable_(*args, **kwargs)
            if context is not None:
                inst.context = context
            result = inst.execute()
            if hasattr(inst, 'post_execute'):
                inst.post_execute()
        else:
            if context is not None:
                callable_.context = context
            result = callable_(*args, **kwargs)
    except Exception as err:
        logger.error('Exception: {}'.format(err))
        exc_type, exc_value, exc_traceback = sys.exc_info()
        tb = traceback.format_tb(exc_traceback)
        details = json_dumps(
            {
                'error': exc_type.__name__,
                'message': str(exc_value),
                'traceback': tb,
            },
            default=repr,
        )
        if cmd_arguments.error_fd == 2:
            sys.stderr.flush()
        if not compat.PY2:
            details = details.encode('utf-8')
        os.write(cmd_arguments.error_fd, details)
        if cmd_arguments.kill_children:
            kill_child_processes()
        sys.exit(1)

    if cmd_arguments.result_fd == 1:  # stdout (legacy)
        sys.stdout.flush()  # may have print's in flight
        os.write(cmd_arguments.result_fd, b'\n')
    result = json_dumps(result)
    if not compat.PY2:
        result = result.encode('utf-8')
    os.write(cmd_arguments.result_fd, result)
    if cmd_arguments.kill_children:
        kill_child_processes()
示例#45
0
def standalone(context,
               workflow,
               domain,
               workflow_id,
               execution_timeout,
               tags,
               decision_tasks_timeout,
               input,
               input_file,
               nb_workers,
               nb_deciders,
               heartbeat,
               display_status,
               repair,
               force_activities,
               ):
    """
    This command spawn a decider and an activity worker to execute a workflow
    with a single main process.

    """
    disable_boto_connection_pooling()

    if force_activities and not repair:
        raise ValueError(
            "You should only use --force-activities with --repair."
        )

    if not workflow_id:
        workflow_id = get_workflow(workflow).name

    wf_input = None
    if input or input_file:
        wf_input = get_or_load_input(input_file, input)

    if repair:
        repair_run_id = None
        if " " in repair:
            repair, repair_run_id = repair.split(" ", 1)
        # get the previous execution history, it will serve as "default history"
        # for activities that succeeded in the previous execution
        logger.info(
            'retrieving history of previous execution: domain={} '
            'workflow_id={} run_id={}'.format(domain, repair, repair_run_id)
        )
        previous_history = get_workflow_history(domain, repair, run_id=repair_run_id)
        previous_history.parse()
        # get the previous execution input if none passed
        if not input and not input_file:
            wf_input = previous_history.events[0].input
    else:
        previous_history = None

    task_list = create_unique_task_list(workflow_id)
    logger.info('using task list {}'.format(task_list))
    decider_proc = multiprocessing.Process(
        target=decider.command.start,
        args=(
            [workflow],
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_deciders,
            'repair_with': previous_history,
            'force_activities': force_activities,
            'is_standalone': True,
        },
    )
    decider_proc.start()

    worker_proc = multiprocessing.Process(
        target=worker.command.start,
        args=(
            domain,
            task_list,
        ),
        kwargs={
            'nb_processes': nb_workers,
            'heartbeat': heartbeat,
        },
    )
    worker_proc.start()

    print('starting workflow {}'.format(workflow), file=sys.stderr)
    ex = start_workflow.callback(
        workflow,
        domain,
        workflow_id,
        task_list,
        execution_timeout,
        tags,
        decision_tasks_timeout,
        json_dumps(wf_input),
        None,
        local=False,
    )
    while True:
        time.sleep(2)
        ex = helpers.get_workflow_execution(
            domain,
            ex.workflow_id,
            ex.run_id,
        )
        if display_status:
            print('status: {}'.format(ex.status), file=sys.stderr)
        if ex.status == ex.STATUS_CLOSED:
            print('execution {} finished'.format(ex.workflow_id), file=sys.stderr)
            break

    os.kill(worker_proc.pid, signal.SIGTERM)
    worker_proc.join()
    os.kill(decider_proc.pid, signal.SIGTERM)
    decider_proc.join()
示例#46
0
def format_arguments_json(*args, **kwargs):
    dump = json_dumps({
        'args': args,
        'kwargs': kwargs,
    })
    return dump
示例#47
0
 def test_json_dumps_pretty(self):
     self.assertEqual(
         json_dumps({"z": 1, "abc": "def"}, pretty=True),
         '{\n    "abc": "def",\n    "z": 1\n}',
     )
示例#48
0
 def test_bugfix_154_default(self):
     actual = json_dumps(datetime.datetime(1970, 1, 1), default=lambda _: 'foo')
     expected = '"foo"'
     self.assertEqual(expected, actual)
示例#49
0
    def replay(self, decision_response, decref_workflow=True):
        """Replay the workflow from the start until it blocks.
        Called by the DeciderWorker.

        :param decision_response: an object wrapping the PollForDecisionTask response
        :type  decision_response: swf.responses.Response
        :param decref_workflow : Decref workflow once replay is done (to save memory)
        :type decref_workflow : boolean

        :returns: a list of decision and a context dict (obsolete, empty)
        :rtype: ([swf.models.decision.base.Decision], dict)
        """
        self.reset()

        history = decision_response.history
        self._history = History(history)
        self._history.parse()
        self.build_execution_context(decision_response)
        self._execution = decision_response.execution

        workflow_started_event = history[0]
        input = workflow_started_event.input
        if input is None:
            input = {}
        args = input.get('args', ())
        kwargs = input.get('kwargs', {})

        self.before_replay()
        try:
            self.propagate_signals()
            result = self.run_workflow(*args, **kwargs)
        except exceptions.ExecutionBlocked:
            logger.info('{} open activities ({} decisions)'.format(
                self._open_activity_count,
                len(self._decisions),
            ))
            self.after_replay()
            if decref_workflow:
                self.decref_workflow()
            if self._append_timer:
                self._add_start_timer_decision('_simpleflow_wake_up_timer')
            return self._decisions, {}
        except exceptions.TaskException as err:
            reason = 'Workflow execution error in task {}: "{}"'.format(
                err.task.name,
                getattr(err.exception, 'reason', repr(err.exception)))
            logger.exception(reason)

            details = getattr(err.exception, 'details', None)
            self.on_failure(reason, details)

            decision = swf.models.decision.WorkflowExecutionDecision()
            decision.fail(
                reason=swf.format.reason(reason),
                details=swf.format.details(details),
            )
            self.after_closed()
            if decref_workflow:
                self.decref_workflow()
            return [decision], {}

        except Exception as err:
            reason = 'Cannot replay the workflow: {}({})'.format(
                err.__class__.__name__,
                err,
            )

            tb = traceback.format_exc()
            details = 'Traceback:\n{}'.format(tb)
            logger.exception(reason + '\n' + details)

            self.on_failure(reason)

            decision = swf.models.decision.WorkflowExecutionDecision()
            decision.fail(
                reason=swf.format.reason(reason),
                details=swf.format.details(details),
            )
            self.after_closed()
            if decref_workflow:
                self.decref_workflow()
            return [decision], {}

        self.after_replay()
        decision = swf.models.decision.WorkflowExecutionDecision()
        decision.complete(result=swf.format.result(json_dumps(result)))
        self.on_completed()
        self.after_closed()
        if decref_workflow:
            self.decref_workflow()
        return [decision], {}
示例#50
0
        def execute(*args, **kwargs):
            logger = logging.getLogger(logger_name)
            command = 'simpleflow.execute'  # name of a module.
            sys.stdout.flush()
            sys.stderr.flush()
            result_str = None  # useless
            context = kwargs.pop('context', {})
            with tempfile.TemporaryFile() as result_fd, tempfile.TemporaryFile(
            ) as error_fd:
                dup_result_fd = os.dup(result_fd.fileno())  # remove FD_CLOEXEC
                dup_error_fd = os.dup(error_fd.fileno())  # remove FD_CLOEXEC
                # print('error_fd: {}'.format(dup_error_fd))
                full_command = [
                    interpreter,
                    '-m',
                    command,  # execute module a script.
                    get_name(func),
                    format_arguments_json(*args, **kwargs),
                    '--logger-name={}'.format(logger_name),
                    '--result-fd={}'.format(dup_result_fd),
                    '--error-fd={}'.format(dup_error_fd),
                    '--context={}'.format(json_dumps(context)),
                ]
                if kill_children:
                    full_command.append('--kill-children')
                if compat.PY2:  # close_fds doesn't work with python2 (using its C _posixsubprocess helper)
                    close_fds = False
                    pass_fds = ()
                else:
                    close_fds = True
                    pass_fds = (dup_result_fd, dup_error_fd)
                process = subprocess.Popen(
                    full_command,
                    bufsize=-1,
                    close_fds=close_fds,
                    pass_fds=pass_fds,
                )
                rc = wait_subprocess(process,
                                     timeout=timeout,
                                     command_info=full_command)
                os.close(dup_result_fd)
                os.close(dup_error_fd)
                if rc:
                    error_fd.seek(0)
                    err_output = error_fd.read()
                    if err_output:
                        if not compat.PY2:
                            err_output = err_output.decode('utf-8',
                                                           errors='replace')
                    raise ExecutionError(err_output)

                result_fd.seek(0)
                result_str = result_fd.read()

            if not result_str:
                return None
            try:
                if not compat.PY2:
                    result_str = result_str.decode('utf-8', errors='replace')
                result = format.decode(result_str)
                return result
            except BaseException as ex:
                logger.exception('Exception in python.execute: {} {}'.format(
                    ex.__class__.__name__, ex))
                logger.warning('%r', result_str)
示例#51
0
def jsonify(values, headers):
    if headers:
        return json_dumps([dict(zip(headers, value)) for value in values])
    else:
        return json_dumps(values)
示例#52
0
def format_arguments_json(*args, **kwargs):
    dump = json_dumps({
        'args': args,
        'kwargs': kwargs,
    })
    return dump
示例#53
0
 def test_bugfix_154_default(self):
     actual = json_dumps(datetime.datetime(1970, 1, 1),
                         default=lambda _: 'foo')
     expected = '"foo"'
     self.assertEqual(expected, actual)
示例#54
0
        def execute(*args, **kwargs):
            logger = logging.getLogger(logger_name)
            command = 'simpleflow.execute'  # name of a module.
            sys.stdout.flush()
            sys.stderr.flush()
            result_str = None  # useless
            context = kwargs.pop('context', {})
            with tempfile.TemporaryFile() as result_fd, tempfile.TemporaryFile(
            ) as error_fd:
                dup_result_fd = os.dup(result_fd.fileno())  # remove FD_CLOEXEC
                dup_error_fd = os.dup(error_fd.fileno())  # remove FD_CLOEXEC
                arguments_json = format_arguments_json(*args, **kwargs)
                full_command = [
                    interpreter,
                    '-m',
                    command,  # execute module a script.
                    get_name(func),
                    '--logger-name={}'.format(logger_name),
                    '--result-fd={}'.format(dup_result_fd),
                    '--error-fd={}'.format(dup_error_fd),
                    '--context={}'.format(json_dumps(context)),
                ]
                if len(
                        arguments_json
                ) < MAX_ARGUMENTS_JSON_LENGTH:  # command-line limit on Linux: 128K
                    full_command.append(arguments_json)
                    arg_file = None
                    arg_fd = None
                else:
                    arg_file = tempfile.TemporaryFile()
                    arg_file.write(arguments_json.encode('utf-8'))
                    arg_file.flush()
                    arg_file.seek(0)
                    arg_fd = os.dup(arg_file.fileno())
                    full_command.append(
                        '--arguments-json-fd={}'.format(arg_fd))
                    full_command.append('foo')  # dummy funcarg
                if kill_children:
                    full_command.append('--kill-children')
                if is_buggy_subprocess32(
                ):  # close_fds doesn't work with subprocess32 < 3.5.0
                    close_fds = False
                    pass_fds = []
                else:
                    close_fds = True
                    pass_fds = [dup_result_fd, dup_error_fd]
                    if arg_file:
                        pass_fds.append(arg_fd)
                process = subprocess.Popen(
                    full_command,
                    bufsize=-1,
                    close_fds=close_fds,
                    pass_fds=pass_fds,
                )
                rc = wait_subprocess(process,
                                     timeout=timeout,
                                     command_info=full_command)
                os.close(dup_result_fd)
                os.close(dup_error_fd)
                if arg_file:
                    arg_file.close()
                if rc:
                    error_fd.seek(0)
                    err_output = error_fd.read()
                    if err_output:
                        if not compat.PY2:
                            err_output = err_output.decode('utf-8',
                                                           errors='replace')
                    raise ExecutionError(err_output)

                result_fd.seek(0)
                result_str = result_fd.read()

            if not result_str:
                return None
            try:
                if not compat.PY2:
                    result_str = result_str.decode('utf-8', errors='replace')
                result = format.decode(result_str)
                return result
            except BaseException as ex:
                logger.exception('Exception in python.execute: {} {}'.format(
                    ex.__class__.__name__, ex))
                logger.warning('%r', result_str)
示例#55
0
def input(message):
    return encode(json_dumps(message), constants.MAX_INPUT_LENGTH)
示例#56
0
    funcname = cmd_arguments.funcname
    try:
        arguments = json.loads(cmd_arguments.funcargs)
    except:
        raise ValueError('cannot load arguments from {}'.format(
            cmd_arguments.funcargs))

    callable_ = make_callable(funcname)
    if hasattr(callable_, '__wrapped__'):
        callable_ = callable_.__wrapped__

    args = arguments.get('args', ())
    kwargs = arguments.get('kwargs', {})
    try:
        if hasattr(callable_, 'execute'):
            result = callable_(*args, **kwargs).execute()
        else:
            result = callable_(*args, **kwargs)
    except Exception as err:
        logger.error('Exception: {}'.format(err))
        # Use base64 encoding to avoid carriage returns and special characters.
        # FIXME change this: brittle, missing traceback
        encoded_err = base64.b64encode(pickle.dumps(err))
        if not compat.PY2:
            # Convert bytes to string
            encoded_err = encoded_err.decode('utf-8', errors='replace')
        print(encoded_err)
        sys.exit(1)
    else:
        print(json_dumps(result))
示例#57
0
def result(message):
    return encode(json_dumps(message), constants.MAX_RESULT_LENGTH)
示例#58
0
        def execute(*args, **kwargs):
            logger = logging.getLogger(logger_name)
            command = 'simpleflow.execute'  # name of a module.
            sys.stdout.flush()
            sys.stderr.flush()
            result_str = None  # useless
            context = kwargs.pop('context', {})
            with tempfile.TemporaryFile() as result_fd, tempfile.TemporaryFile() as error_fd:
                dup_result_fd = os.dup(result_fd.fileno())  # remove FD_CLOEXEC
                dup_error_fd = os.dup(error_fd.fileno())  # remove FD_CLOEXEC
                arguments_json = format_arguments_json(*args, **kwargs)
                full_command = [
                    interpreter, '-m', command,  # execute module a script.
                    get_name(func),
                    '--logger-name={}'.format(logger_name),
                    '--result-fd={}'.format(dup_result_fd),
                    '--error-fd={}'.format(dup_error_fd),
                    '--context={}'.format(json_dumps(context)),
                ]
                if len(arguments_json) < MAX_ARGUMENTS_JSON_LENGTH:  # command-line limit on Linux: 128K
                    full_command.append(arguments_json)
                    arg_file = None
                    arg_fd = None
                else:
                    arg_file = tempfile.TemporaryFile()
                    arg_file.write(arguments_json.encode('utf-8'))
                    arg_file.flush()
                    arg_file.seek(0)
                    arg_fd = os.dup(arg_file.fileno())
                    full_command.append('--arguments-json-fd={}'.format(arg_fd))
                    full_command.append('foo')  # dummy funcarg
                if kill_children:
                    full_command.append('--kill-children')
                if is_buggy_subprocess32():  # close_fds doesn't work with subprocess32 < 3.5.0
                    close_fds = False
                    pass_fds = []
                else:
                    close_fds = True
                    pass_fds = [dup_result_fd, dup_error_fd]
                    if arg_file:
                        pass_fds.append(arg_fd)
                process = subprocess.Popen(
                    full_command,
                    bufsize=-1,
                    close_fds=close_fds,
                    pass_fds=pass_fds,
                )
                rc = wait_subprocess(process, timeout=timeout, command_info=full_command)
                os.close(dup_result_fd)
                os.close(dup_error_fd)
                if arg_file:
                    arg_file.close()
                if rc:
                    error_fd.seek(0)
                    err_output = error_fd.read()
                    if err_output:
                        if not compat.PY2:
                            err_output = err_output.decode('utf-8', errors='replace')
                    raise ExecutionError(err_output)

                result_fd.seek(0)
                result_str = result_fd.read()

            if not result_str:
                return None
            try:
                if not compat.PY2:
                    result_str = result_str.decode('utf-8', errors='replace')
                result = format.decode(result_str)
                return result
            except BaseException as ex:
                logger.exception('Exception in python.execute: {} {}'.format(ex.__class__.__name__, ex))
                logger.warning('%r', result_str)