예제 #1
0
def generic_handler(event, context_dict):
    """
    context_dict is generic infromation about the context
    that we are running in, provided by the scheduler
    """

    try:
        response_status = {'exception': None}
        s3 = boto3.resource('s3')

        logger.info("invocation started")

        # download the input
        status_key = event['status_key']
        func_key = event['func_key']
        data_key = event['data_key']
        data_byte_range = event['data_byte_range']
        output_key = event['output_key']

        if version.__version__ != event['pywren_version']:
            raise Exception("WRONGVERSION", "Pywren version mismatch",
                            version.__version__, event['pywren_version'])

        start_time = time.time()
        response_status['start_time'] = start_time

        func_filename = "/tmp/func.pickle"
        data_filename = "/tmp/data.pickle"
        output_filename = "/tmp/output.pickle"

        runtime_s3_bucket = event['runtime_s3_bucket']
        runtime_s3_key = event['runtime_s3_key']
        if event.get('runtime_url'):
            # NOTE(shivaram): Right now we only support S3 urls.
            runtime_s3_bucket_used, runtime_s3_key_used = wrenutil.split_s3_url(
                event['runtime_url'])
        else:
            runtime_s3_bucket_used = runtime_s3_bucket
            runtime_s3_key_used = runtime_s3_key

        job_max_runtime = event.get("job_max_runtime",
                                    290)  # default for lambda

        response_status['func_key'] = func_key
        response_status['data_key'] = data_key
        response_status['output_key'] = output_key
        response_status['status_key'] = status_key

        b, k = data_key
        KS = s3util.key_size(b, k)
        #logger.info("bucket=", b, "key=", k,  "status: ", KS, "bytes" )
        while KS is None:
            logger.warn("WARNING COULD NOT GET FIRST KEY")

            KS = s3util.key_size(b, k)
        if not event['use_cached_runtime']:
            subprocess.check_output("rm -Rf {}/*".format(RUNTIME_LOC),
                                    shell=True)

        # get the input and save to disk
        # FIXME here is we where we would attach the "canceled" metadata
        s3.meta.client.download_file(func_key[0], func_key[1], func_filename)
        func_download_time = time.time() - start_time
        response_status['func_download_time'] = func_download_time

        logger.info("func download complete, took {:3.2f} sec".format(
            func_download_time))

        if data_byte_range is None:
            s3.meta.client.download_file(data_key[0], data_key[1],
                                         data_filename)
        else:
            range_str = 'bytes={}-{}'.format(*data_byte_range)
            dres = s3.meta.client.get_object(Bucket=data_key[0],
                                             Key=data_key[1],
                                             Range=range_str)
            data_fid = open(data_filename, 'wb')
            data_fid.write(dres['Body'].read())
            data_fid.close()

        data_download_time = time.time() - start_time
        logger.info("data data download complete, took {:3.2f} sec".format(
            data_download_time))
        response_status['data_download_time'] = data_download_time

        # now split
        d = json.load(open(func_filename, 'r'))
        shutil.rmtree(PYTHON_MODULE_PATH, True)  # delete old modules
        os.mkdir(PYTHON_MODULE_PATH)
        # get modules and save
        for m_filename, m_data in d['module_data'].items():
            m_path = os.path.dirname(m_filename)

            if len(m_path) > 0 and m_path[0] == "/":
                m_path = m_path[1:]
            to_make = os.path.join(PYTHON_MODULE_PATH, m_path)
            #print "to_make=", to_make, "m_path=", m_path
            try:
                os.makedirs(to_make)
            except OSError as e:
                if e.errno == 17:
                    pass
                else:
                    raise e
            full_filename = os.path.join(to_make, os.path.basename(m_filename))
            #print "creating", full_filename
            fid = open(full_filename, 'wb')
            fid.write(b64str_to_bytes(m_data))
            fid.close()
        logger.info("Finished writing {} module files".format(
            len(d['module_data'])))
        logger.debug(
            subprocess.check_output("find {}".format(PYTHON_MODULE_PATH),
                                    shell=True))
        logger.debug(
            subprocess.check_output("find {}".format(os.getcwd()), shell=True))

        response_status['runtime_s3_key_used'] = runtime_s3_key_used
        response_status['runtime_s3_bucket_used'] = runtime_s3_bucket_used

        runtime_cached = download_runtime_if_necessary(s3,
                                                       runtime_s3_bucket_used,
                                                       runtime_s3_key_used)
        logger.info("Runtime ready, cached={}".format(runtime_cached))
        response_status['runtime_cached'] = runtime_cached

        cwd = os.getcwd()
        jobrunner_path = os.path.join(cwd, "jobrunner.py")

        extra_env = event.get('extra_env', {})
        extra_env['PYTHONPATH'] = "{}:{}".format(os.getcwd(),
                                                 PYTHON_MODULE_PATH)

        call_id = event['call_id']
        callset_id = event['callset_id']
        response_status['call_id'] = call_id
        response_status['callset_id'] = callset_id

        CONDA_PYTHON_PATH = "/tmp/condaruntime/bin"
        CONDA_PYTHON_RUNTIME = os.path.join(CONDA_PYTHON_PATH, "python")

        cmdstr = "{} {} {} {} {}".format(CONDA_PYTHON_RUNTIME, jobrunner_path,
                                         func_filename, data_filename,
                                         output_filename)

        setup_time = time.time()
        response_status['setup_time'] = setup_time - start_time

        local_env = os.environ.copy()

        local_env["OMP_NUM_THREADS"] = "1"
        local_env.update(extra_env)

        local_env['PATH'] = "{}:{}".format(CONDA_PYTHON_PATH,
                                           local_env.get("PATH", ""))

        logger.debug("command str=%s", cmdstr)
        # This is copied from http://stackoverflow.com/a/17698359/4577954
        # reasons for setting process group: http://stackoverflow.com/a/4791612
        process = subprocess.Popen(cmdstr,
                                   shell=True,
                                   env=local_env,
                                   bufsize=1,
                                   stdout=subprocess.PIPE,
                                   preexec_fn=os.setsid)

        logger.info("launched process")

        def consume_stdout(stdout, queue):
            with stdout:
                for line in iter(stdout.readline, b''):
                    queue.put(line)

        q = Queue()

        t = Thread(target=consume_stdout, args=(process.stdout, q))
        t.daemon = True
        t.start()

        stdout = b""
        while t.isAlive():
            try:
                line = q.get_nowait()
                stdout += line
                logger.info(line)
            except Empty:
                time.sleep(PROCESS_STDOUT_SLEEP_SECS)
            total_runtime = time.time() - start_time
            if total_runtime > job_max_runtime:
                logger.warn(
                    "Process exceeded maximum runtime of {} sec".format(
                        job_max_runtime))
                # Send the signal to all the process groups
                os.killpg(os.getpgid(process.pid), signal.SIGTERM)
                raise Exception(
                    "OUTATIME", "Process executed for too long and was killed")

        logger.info("command execution finished")

        s3.meta.client.upload_file(output_filename, output_key[0],
                                   output_key[1])
        logger.debug("output uploaded to %s %s", output_key[0], output_key[1])

        end_time = time.time()

        response_status['stdout'] = stdout.decode("ascii")

        response_status['exec_time'] = time.time() - setup_time
        response_status['end_time'] = end_time

        response_status['host_submit_time'] = event['host_submit_time']
        response_status['server_info'] = get_server_info()

        response_status.update(context_dict)
    except Exception as e:
        # internal runtime exceptions
        response_status['exception'] = str(e)
        response_status['exception_args'] = e.args
        response_status['exception_traceback'] = traceback.format_exc()
    finally:

        s3.meta.client.put_object(Bucket=status_key[0],
                                  Key=status_key[1],
                                  Body=json.dumps(response_status))
예제 #2
0
def handler(event, context):
    s3 = boto3.resource('s3')

    start_time = time.time()

    func_filename = "/tmp/func.pickle"
    data_filename = "/tmp/data.pickle"
    output_filename = "/tmp/output.pickle"
    # cleanup previous invocations
    subprocess.check_output("rm -Rf /tmp/*", shell=True)

    server_info = {
        '/proc/cpuinfo': open("/proc/cpuinfo", 'r').read(),
        '/proc/meminfo': open("/proc/meminfo", 'r').read(),
        '/proc/self/cgroup': open("/proc/meminfo", 'r').read(),
        '/proc/cgroups': open("/proc/cgroups", 'r').read()
    }

    print "invocation started"

    # download the input
    func_key = event['func_key']
    data_key = event['data_key']
    data_byte_range = event['data_byte_range']
    output_key = event['output_key']
    status_key = event['status_key']
    runtime_s3_bucket = event['runtime_s3_bucket']
    runtime_s3_key = event['runtime_s3_key']

    b, k = data_key
    KS = s3util.key_size(b, k)
    print "bucket=", b, "key=", k, "status: ", KS, "bytes"
    while KS is None:
        print "WARNING COULD NOT GET FIRST KEY"

        KS = s3util.key_size(b, k)

    # get the input and save to disk
    # FIXME here is we where we would attach the "canceled" metadata
    s3.meta.client.download_file(func_key[0], func_key[1], func_filename)
    func_download_time = time.time()
    print "func download complete"

    if data_byte_range is None:
        s3.meta.client.download_file(data_key[0], data_key[1], data_filename)
    else:
        range_str = 'bytes={}-{}'.format(*data_byte_range)
        dres = s3.meta.client.get_object(Bucket=data_key[0],
                                         Key=data_key[1],
                                         Range=range_str)
        data_fid = open(data_filename, 'w')
        data_fid.write(dres['Body'].read())
        data_fid.close()

    input_download_time = time.time()

    print "input data download complete"

    # now split
    d = pickle.load(open(func_filename, 'r'))

    os.mkdir("/tmp/pymodules")
    # get modules and save
    for m_filename, m_text in d['module_data'].iteritems():
        m_path = os.path.dirname(m_filename)

        if len(m_path) > 0 and m_path[0] == "/":
            m_path = m_path[1:]
        to_make = os.path.join(PYTHON_MODULE_PATH, m_path)
        print "to_make=", to_make, "m_path=", m_path
        try:
            os.makedirs(to_make)
        except OSError as e:
            if e.errno == 17:
                pass
            else:
                raise e
        full_filename = os.path.join(to_make, os.path.basename(m_filename))
        print "creating", full_filename
        fid = open(full_filename, 'w')
        fid.write(m_text)
        fid.close()
    print subprocess.check_output("find {}".format(PYTHON_MODULE_PATH),
                                  shell=True)
    print subprocess.check_output("find {}".format(os.getcwd()), shell=True)

    ## Now get the runtime

    res = s3.meta.client.get_object(Bucket=runtime_s3_bucket,
                                    Key=runtime_s3_key)

    condatar = tarfile.open(mode="r:gz",
                            fileobj=wrenutil.WrappedStreamingBody(
                                res['Body'], res['ContentLength']))
    condatar.extractall('/tmp/')
    print "download and untar of conda runtime complete"

    cwd = os.getcwd()
    jobrunner_path = os.path.join(cwd, "jobrunner.py")

    print event
    extra_env = event.get('extra_env', {})
    extra_env['PYTHONPATH'] = "{}:{}".format(os.getcwd(), PYTHON_MODULE_PATH)

    call_id = event['call_id']
    callset_id = event['callset_id']

    print "state written to disk"

    CONDA_PYTHON_RUNTIME = "/tmp/condaruntime/bin/python"

    cmdstr = "{} {} {} {} {}".format(CONDA_PYTHON_RUNTIME, jobrunner_path,
                                     func_filename, data_filename,
                                     output_filename)

    setup_time = time.time()

    local_env = os.environ.copy()

    local_env["OMP_NUM_THREADS"] = "1"
    local_env.update(extra_env)

    print "command str=", cmdstr
    stdout = subprocess.check_output(cmdstr, shell=True, env=local_env)
    print "command executed, stdout=", stdout

    s3.meta.client.upload_file(output_filename, output_key[0], output_key[1])

    end_time = time.time()

    d = {
        'stdout': stdout,
        'call_id': call_id,
        'callset_id': callset_id,
        'start_time': start_time,
        'setup_time': setup_time - start_time,
        'exec_time': time.time() - setup_time,
        'func_key': func_key,
        'data_key': data_key,
        'output_key': output_key,
        'status_key': status_key,
        'end_time': end_time,
        'host_submit_time': event['host_submit_time'],
        'aws_request_id': context.aws_request_id,
        'log_group_name': context.log_group_name,
        'log_stream_name': context.log_stream_name,
        'server_info': server_info,
    }

    s3.meta.client.put_object(Bucket=status_key[0],
                              Key=status_key[1],
                              Body=json.dumps(d))

    return d
예제 #3
0
def generic_handler(event, context_dict):
    """
    context_dict is generic infromation about the context
    that we are running in, provided by the scheduler
    """

    s3 = boto3.resource('s3')

    start_time = time.time()

    func_filename = "/tmp/func.pickle"
    data_filename = "/tmp/data.pickle"
    output_filename = "/tmp/output.pickle"

    server_info = {
        'uname': subprocess.check_output("uname -a",
                                         shell=True).decode("ascii")
    }
    if os.path.exists("/proc"):
        server_info.update({
            '/proc/cpuinfo':
            open("/proc/cpuinfo", 'r').read(),
            '/proc/meminfo':
            open("/proc/meminfo", 'r').read(),
            '/proc/self/cgroup':
            open("/proc/meminfo", 'r').read(),
            '/proc/cgroups':
            open("/proc/cgroups", 'r').read()
        })

    logger.info("invocation started")

    # download the input
    func_key = event['func_key']
    data_key = event['data_key']
    data_byte_range = event['data_byte_range']
    output_key = event['output_key']
    status_key = event['status_key']
    runtime_s3_bucket = event['runtime_s3_bucket']
    runtime_s3_key = event['runtime_s3_key']
    job_max_runtime = event.get("job_max_runtime", 290)  # default for lambda

    b, k = data_key
    KS = s3util.key_size(b, k)
    #logger.info("bucket=", b, "key=", k,  "status: ", KS, "bytes" )
    while KS is None:
        logger.warn("WARNING COULD NOT GET FIRST KEY")

        KS = s3util.key_size(b, k)
    if not event['use_cached_runtime']:
        subprocess.check_output("rm -Rf {}/*".format(RUNTIME_LOC), shell=True)

    # get the input and save to disk
    # FIXME here is we where we would attach the "canceled" metadata
    s3.meta.client.download_file(func_key[0], func_key[1], func_filename)
    func_download_time = time.time()
    logger.info("func download complete")

    if data_byte_range is None:
        s3.meta.client.download_file(data_key[0], data_key[1], data_filename)
    else:
        range_str = 'bytes={}-{}'.format(*data_byte_range)
        dres = s3.meta.client.get_object(Bucket=data_key[0],
                                         Key=data_key[1],
                                         Range=range_str)
        data_fid = open(data_filename, 'wb')
        data_fid.write(dres['Body'].read())
        data_fid.close()

    input_download_time = time.time()

    logger.info("input data download complete")

    # now split
    d = json.load(open(func_filename, 'r'))
    shutil.rmtree("/tmp/pymodules", True)  # delete old modules
    os.mkdir("/tmp/pymodules")
    # get modules and save
    for m_filename, m_text in d['module_data'].items():
        m_path = os.path.dirname(m_filename)

        if len(m_path) > 0 and m_path[0] == "/":
            m_path = m_path[1:]
        to_make = os.path.join(PYTHON_MODULE_PATH, m_path)
        #print "to_make=", to_make, "m_path=", m_path
        try:
            os.makedirs(to_make)
        except OSError as e:
            if e.errno == 17:
                pass
            else:
                raise e
        full_filename = os.path.join(to_make, os.path.basename(m_filename))
        #print "creating", full_filename
        fid = open(full_filename, 'w')
        fid.write(m_text)
        fid.close()
    logger.debug(
        subprocess.check_output("find {}".format(PYTHON_MODULE_PATH),
                                shell=True))
    logger.debug(
        subprocess.check_output("find {}".format(os.getcwd()), shell=True))

    ## Now get the runtime

    # res = s3.meta.client.get_object(Bucket=runtime_s3_bucket,
    #                                 Key=runtime_s3_key)

    # condatar = tarfile.open(mode= "r:gz",
    #                         fileobj = wrenutil.WrappedStreamingBody(res['Body'],
    #                                                                 res['ContentLength']))
    # condatar.extractall('/tmp/')
    # print "download and untar of conda runtime complete"

    runtime_cached = download_runtime_if_necessary(s3, runtime_s3_bucket,
                                                   runtime_s3_key)

    cwd = os.getcwd()
    jobrunner_path = os.path.join(cwd, "jobrunner.py")

    extra_env = event.get('extra_env', {})
    extra_env['PYTHONPATH'] = "{}:{}".format(os.getcwd(), PYTHON_MODULE_PATH)

    call_id = event['call_id']
    callset_id = event['callset_id']

    CONDA_PYTHON_RUNTIME = "/tmp/condaruntime/bin/python"

    cmdstr = "{} {} {} {} {}".format(CONDA_PYTHON_RUNTIME, jobrunner_path,
                                     func_filename, data_filename,
                                     output_filename)

    setup_time = time.time()

    local_env = os.environ.copy()

    local_env["OMP_NUM_THREADS"] = "1"
    local_env.update(extra_env)

    logger.debug("command str=%s", cmdstr)
    # This is copied from http://stackoverflow.com/a/17698359/4577954
    # reasons for setting process group: http://stackoverflow.com/a/4791612
    process = subprocess.Popen(cmdstr,
                               shell=True,
                               env=local_env,
                               bufsize=1,
                               stdout=subprocess.PIPE,
                               preexec_fn=os.setsid)

    def consume_stdout(stdout, queue):
        with stdout:
            for line in iter(stdout.readline, b''):
                queue.put(line)

    q = Queue()

    t = Thread(target=consume_stdout, args=(process.stdout, q))
    t.daemon = True
    t.start()

    stdout = b""
    process_timeout_killed = False
    while t.isAlive():
        try:
            line = q.get_nowait()
            stdout += line
            logger.info(line)
        except Empty:
            time.sleep(PROCESS_STDOUT_SLEEP_SECS)
        total_runtime = time.time() - start_time
        if total_runtime > job_max_runtime:
            logger.warn("Process exceeded maximum runtime of {} sec".format(
                job_max_runtime))
            # Send the signal to all the process groups
            os.killpg(os.getpgid(process.pid), signal.SIGTERM)
            process_timeout_killed = True
            # create dummy file for upload

    logger.info("command execution finished")

    s3.meta.client.upload_file(output_filename, output_key[0], output_key[1])
    logger.debug("output uploaded to %s %s", output_key[0], output_key[1])

    end_time = time.time()

    d = {
        'stdout': stdout.decode("ascii"),
        'call_id': call_id,
        'process_timeout_killed': process_timeout_killed,
        'callset_id': callset_id,
        'start_time': start_time,
        'setup_time': setup_time - start_time,
        'exec_time': time.time() - setup_time,
        'func_key': func_key,
        'data_key': data_key,
        'output_key': output_key,
        'status_key': status_key,
        'end_time': end_time,
        'runtime_cached': runtime_cached,
        'host_submit_time': event['host_submit_time'],
        'server_info': server_info,
    }
    d.update(context_dict)

    s3.meta.client.put_object(Bucket=status_key[0],
                              Key=status_key[1],
                              Body=json.dumps(d))

    return d
예제 #4
0
def generic_handler(event, context_dict):
    """
    context_dict is generic infromation about the context
    that we are running in, provided by the scheduler
    """

    s3 = boto3.resource('s3')

    start_time = time.time()

    func_filename = "/tmp/func.pickle"
    data_filename = "/tmp/data.pickle"
    output_filename = "/tmp/output.pickle"

    server_info = {
        '/proc/cpuinfo': open("/proc/cpuinfo", 'r').read(),
        '/proc/meminfo': open("/proc/meminfo", 'r').read(),
        '/proc/self/cgroup': open("/proc/meminfo", 'r').read(),
        '/proc/cgroups': open("/proc/cgroups", 'r').read()
    }

    logger.info("invocation started")

    # download the input
    func_key = event['func_key']
    data_key = event['data_key']
    data_byte_range = event['data_byte_range']
    output_key = event['output_key']
    status_key = event['status_key']
    runtime_s3_bucket = event['runtime_s3_bucket']
    runtime_s3_key = event['runtime_s3_key']

    b, k = data_key
    KS = s3util.key_size(b, k)
    #logger.info("bucket=", b, "key=", k,  "status: ", KS, "bytes" )
    while KS is None:
        logger.warn("WARNING COULD NOT GET FIRST KEY")

        KS = s3util.key_size(b, k)
    if not event['use_cached_runtime']:
        subprocess.check_output("rm -Rf {}/*".format(RUNTIME_LOC), shell=True)

    # get the input and save to disk
    # FIXME here is we where we would attach the "canceled" metadata
    s3.meta.client.download_file(func_key[0], func_key[1], func_filename)
    func_download_time = time.time()
    logger.info("func download complete")

    if data_byte_range is None:
        s3.meta.client.download_file(data_key[0], data_key[1], data_filename)
    else:
        range_str = 'bytes={}-{}'.format(*data_byte_range)
        dres = s3.meta.client.get_object(Bucket=data_key[0],
                                         Key=data_key[1],
                                         Range=range_str)
        data_fid = open(data_filename, 'w')
        data_fid.write(dres['Body'].read())
        data_fid.close()

    input_download_time = time.time()

    logger.info("input data download complete")

    # now split
    d = pickle.load(open(func_filename, 'r'))
    shutil.rmtree("/tmp/pymodules", True)  # delete old modules
    os.mkdir("/tmp/pymodules")
    # get modules and save
    for m_filename, m_text in d['module_data'].iteritems():
        m_path = os.path.dirname(m_filename)

        if len(m_path) > 0 and m_path[0] == "/":
            m_path = m_path[1:]
        to_make = os.path.join(PYTHON_MODULE_PATH, m_path)
        #print "to_make=", to_make, "m_path=", m_path
        try:
            os.makedirs(to_make)
        except OSError as e:
            if e.errno == 17:
                pass
            else:
                raise e
        full_filename = os.path.join(to_make, os.path.basename(m_filename))
        #print "creating", full_filename
        fid = open(full_filename, 'w')
        fid.write(m_text)
        fid.close()
    logger.debug(
        subprocess.check_output("find {}".format(PYTHON_MODULE_PATH),
                                shell=True))
    logger.debug(
        subprocess.check_output("find {}".format(os.getcwd()), shell=True))

    ## Now get the runtime

    # res = s3.meta.client.get_object(Bucket=runtime_s3_bucket,
    #                                 Key=runtime_s3_key)

    # condatar = tarfile.open(mode= "r:gz",
    #                         fileobj = wrenutil.WrappedStreamingBody(res['Body'],
    #                                                                 res['ContentLength']))
    # condatar.extractall('/tmp/')
    # print "download and untar of conda runtime complete"

    runtime_cached = download_runtime_if_necessary(s3, runtime_s3_bucket,
                                                   runtime_s3_key)

    cwd = os.getcwd()
    jobrunner_path = os.path.join(cwd, "jobrunner.py")

    extra_env = event.get('extra_env', {})
    extra_env['PYTHONPATH'] = "{}:{}".format(os.getcwd(), PYTHON_MODULE_PATH)

    call_id = event['call_id']
    callset_id = event['callset_id']

    CONDA_PYTHON_RUNTIME = "/tmp/condaruntime/bin/python"

    cmdstr = "{} {} {} {} {}".format(CONDA_PYTHON_RUNTIME, jobrunner_path,
                                     func_filename, data_filename,
                                     output_filename)

    setup_time = time.time()

    local_env = os.environ.copy()

    local_env["OMP_NUM_THREADS"] = "1"
    local_env.update(extra_env)

    logger.debug("command str=%s", cmdstr)
    # This is copied from http://stackoverflow.com/a/17698359/4577954
    process = subprocess.Popen(cmdstr,
                               shell=True,
                               env=local_env,
                               bufsize=1,
                               stdout=subprocess.PIPE)
    stdout = ''
    with process.stdout:
        for line in iter(process.stdout.readline, b''):
            stdout += line
            logger.info(line)

    # TODO(shivaram): It looks like the deadlock warning in subprocess should not apply here
    # as we drain the stdout before calling wait ?
    process.wait()
    logger.info("command execution finished")

    s3.meta.client.upload_file(output_filename, output_key[0], output_key[1])
    logger.debug("output uploaded to %s %s", output_key[0], output_key[1])

    end_time = time.time()

    d = {
        'stdout': stdout,
        'call_id': call_id,
        'callset_id': callset_id,
        'start_time': start_time,
        'setup_time': setup_time - start_time,
        'exec_time': time.time() - setup_time,
        'func_key': func_key,
        'data_key': data_key,
        'output_key': output_key,
        'status_key': status_key,
        'end_time': end_time,
        'runtime_cached': runtime_cached,
        'host_submit_time': event['host_submit_time'],
        'server_info': server_info,
    }
    d.update(context_dict)

    s3.meta.client.put_object(Bucket=status_key[0],
                              Key=status_key[1],
                              Body=json.dumps(d))

    return d