コード例 #1
0
def _process_payload_step(payload_str, obj):
    """
    Internal function to turn a json fsm payload (from an AWS Lambda event),
    into an fsm Context, and then dispatch the event and execute user code.

    This function is ONLY used in the AWS Step Function execution path.

    :param payload_str: a json string like '{"serialized": "data"}'
    :param obj: a dict to pass to fsm Context.dispatch(...)
    """
    payload = json.loads(payload_str, **json_loads_additional_kwargs())
    obj[OBJ.PAYLOAD] = payload_str
    fsm = Context.from_payload_dict(payload)
    logger.debug('system_context=%s', fsm.system_context())
    logger.debug('user_context.keys()=%s', fsm.user_context().keys())
    current_event = fsm.system_context().get(SYSTEM_CONTEXT.CURRENT_EVENT,
                                             STATE.PSEUDO_INIT)

    # all retries etc. are handled by AWS Step Function infrastructure
    # so this an entirely stripped down dispatch running ONLY the user
    # Actions, and NONE of the framework's retry etc. code.
    next_event = fsm.current_state.dispatch(fsm, current_event, obj)
    if next_event:
        fsm.current_event = next_event
        data = fsm.to_payload_dict()
        data[AWS.STEP_FUNCTION] = True
        return data
コード例 #2
0
    def do_POST(self):

        length = int(self.headers['content-length'])
        data = json.loads(self.rfile.read(length), **json_loads_additional_kwargs())

        subprocess_args = ['docker', 'run', '-v', '/var/run/docker.sock:/var/run/docker.sock']
        if 'VOLUME' in os.environ:
            subprocess_args.extend(['-v', os.environ['VOLUME']])
        if 'LINK' in os.environ:
            subprocess_args.extend(['--link=' + os.environ['LINK']])
        if 'NETWORK' in os.environ:
            subprocess_args.extend(['--network=' + os.environ['NETWORK']])
        co = data.get('overrides', {}).get('containerOverrides', [])
        environ = {}
        if co:
            for env in co[0].get('environment', []):
                environ[env['name']] = env['value']
                subprocess_args.extend(['-e', '%(name)s=%(value)s' % env])
        subprocess_args.extend(['-e', 'PYTHON_BIN=%s' % os.environ['PYTHON_BIN']])
        subprocess_args.append(args.image)
        subprocess.call(subprocess_args)

        self.send_response(200)
        self.send_header("Content-Type", "application/json")
        self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
        self.send_header("Pragma", "no-cache")
        self.send_header("Expires", "0")
        self.send_header("Content-Length", "2")
        self.end_headers()
        self.wfile.write('{}')
コード例 #3
0
 def trace(self, uvars=(), raw=False):
     s = 'system_context'
     u = 'user_context'
     svars = ('current_state', 'current_event', 'steps', 'retries')
     serialized = [
         json.loads(x, **json_loads_additional_kwargs())
         for x in self.all_messages
     ]
     if raw:
         return serialized
     data = enumerate(serialized)
     return [(x[0], tuple(x[1][s][v] for v in svars),
              tuple(x[1][u].get(v) for v in uvars)) for x in data]
コード例 #4
0
def _process_payload(payload_str, obj):
    """
    Internal function to turn a json fsm payload (from an AWS Lambda event),
    into an fsm Context, and then dispatch the event and execute user code.

    :param payload_str: a json string like '{"serialized": "data"}'
    :param obj: a dict to pass to fsm Context.dispatch(...)
    """
    payload = json.loads(payload_str, **json_loads_additional_kwargs())
    obj[OBJ.PAYLOAD] = payload_str
    fsm = Context.from_payload_dict(payload)
    logger.debug('system_context=%s', fsm.system_context())
    logger.debug('user_context.keys()=%s', fsm.user_context().keys())
    current_event = fsm.system_context().get(SYSTEM_CONTEXT.CURRENT_EVENT,
                                             STATE.PSEUDO_INIT)
    fsm.dispatch(current_event, obj)
コード例 #5
0
    def _retry(self, obj):
        """
        Handle the unhappy path errors by saving the last payload with the "retries"
        parameter increased by 1. If too many retries have been attempted, terminate
        the machine, and record a FATAL error.

        :param obj: a dict.
        """
        # fetch the original payload from the obj in-memory data. we grab the original
        # payload rather than the current context to avoid passing any vars that were
        # potentially mutated up to this point.
        payload = obj[OBJ.PAYLOAD]
        retry_data = json.loads(payload, **json_loads_additional_kwargs())
        retry_system_context = retry_data[PAYLOAD.SYSTEM_CONTEXT]
        retry_system_context[SYSTEM_CONTEXT.RETRIES] += 1

        # determine how many times this has been retried, and if it has been retried
        # too many times, then stop it permanently
        if retry_system_context[SYSTEM_CONTEXT.RETRIES] <= self.max_retries:
            self._queue_error(
                ERRORS.RETRY,
                'More retries allowed (retry=%d, max=%d). Retrying.' %
                (retry_system_context[SYSTEM_CONTEXT.RETRIES],
                 self.max_retries))
            retried = self._start_retries(retry_data, obj)

            # things are falling off the rails
            if not retried:
                self._queue_error(
                    ERRORS.RETRY,
                    'System error during retry. Failover to event stream.')
                self._start_retries(retry_data, obj, recovering=True)

        # if there are no more retries available, simply log an error, then delete
        # the retry entity from dynamodb. it will take human intervention to recover things
        # at this point.
        else:
            self._queue_error(
                ERRORS.FATAL,
                'No more retries allowed (retry=%d, max=%d). Terminating.' %
                (retry_system_context[SYSTEM_CONTEXT.RETRIES],
                 self.max_retries))
            self._stop_retries(obj)
コード例 #6
0
    client.start(container=container)
    stdout = client.logs(container, stdout=True, stream=True)
    for line in stdout:
        sys.stdout.write(line)
    stderr = client.logs(container, stderr=True, stream=True)
    for line in stderr:
        sys.stderr.write(line)
    return_code = client.wait(container)

except Exception:
    logging.exception('')
    raise

finally:

    if not environment:
        sys.stderr.write(FATAL_ENVIRONMENT_ERROR)
        sys.exit(1)

    # FSM_CONTEXT is the environment variable used by aws_lambda_fsm.utils.ECSTaskEntryAction
    event = DONE_EVENT if return_code == 0 else FAIL_EVENT
    payload_encoded = environment[ENVIRONMENT.FSM_CONTEXT]
    payload = json.loads(base64.b64decode(payload_encoded), **json_loads_additional_kwargs())
    payload[PAYLOAD.SYSTEM_CONTEXT][SYSTEM_CONTEXT.CURRENT_EVENT] = event
    serialized = json.dumps(payload, **json_dumps_additional_kwargs())
    send_next_event_for_dispatch(
        None,
        serialized,
        payload[PAYLOAD.SYSTEM_CONTEXT][SYSTEM_CONTEXT.CORRELATION_ID]
    )
コード例 #7
0
args = parser.parse_args()

logging.basicConfig(
    format='[%(levelname)s] %(asctime)-15s %(message)s',
    level=int(args.log_level) if args.log_level.isdigit() else args.log_level,
    datefmt='%Y-%m-%d %H:%M:%S')

logging.getLogger('boto3').setLevel(args.boto_log_level)
logging.getLogger('botocore').setLevel(args.boto_log_level)

validate_config()

if args.num_machines > 1:
    # start things off
    context = json.loads(args.initial_context or "{}",
                         **json_loads_additional_kwargs())
    current_state = current_event = STATE.PSEUDO_INIT
    start_state_machines(args.machine_name, [context] * args.num_machines,
                         current_state=current_state,
                         current_event=current_event)
    exit(0)

# checkpoint specified, so start with a context saved to the kinesis stream
if args.checkpoint_shard_id and args.checkpoint_sequence_number:

    # setup connections to AWS
    kinesis_stream_arn = getattr(settings, args.kinesis_stream_arn)
    logging.info('Kinesis stream ARN: %s', kinesis_stream_arn)
    logging.info('Kinesis endpoint: %s', settings.ENDPOINTS.get(AWS.KINESIS))
    if get_arn_from_arn_string(kinesis_stream_arn).service != AWS.KINESIS:
        logging.fatal("%s is not a Kinesis ARN", kinesis_stream_arn)
コード例 #8
0
 def do_POST(self):
     data_str = self.rfile.read(int(self.headers['Content-Length']))
     data = json.loads(data_str, **json_loads_additional_kwargs())
     self.server.message = data[AWS_SNS.Message]
     self.send_response(200)
     self.wfile.write("")
コード例 #9
0
 def test_json_loads_additional_kwargs_using_settings(self, mock_settings):
     mock_settings.JSON_LOADS_ADDITIONAL_KWARGS = {'default': lambda x: "foobar"}
     self.assertEquals({'default'}, set(json_loads_additional_kwargs().keys()))
     self.assertEquals("foobar", json_loads_additional_kwargs()['default']('~~~'))
コード例 #10
0
 def test_json_loads_additional_kwargs_defaults(self):
     self.assertEquals({}, json_loads_additional_kwargs())
コード例 #11
0
 def test_custom_decoder(self, mock_settings):
     mock_settings.JSON_LOADS_ADDITIONAL_KWARGS = {'cls': Decoder}
     self.assertEquals('A', json.loads("B", **json_loads_additional_kwargs()))