Exemple #1
0
def makeIntersectionGif(event, context):
    data = event.get('queryStringParameters', {})
    id = data.get('id', 0)

    m = Map(float(data.get('lat', 39.82)), float(data.get('lon', -98.58)))
    m.create()

    output = subprocess.call([
        './gifcreate.sh',
        data.get('nm1', ''),
        data.get('nm2', ''),
        data.get('loc', ''), id
    ])

    filename = "{}/{}.mp4".format(WRITE_DIR, id)
    gifname = "{}/{}.gif".format(WRITE_DIR, id)
    files = {'file': open(filename, 'rb')}

    # Upload Video and container to S3
    s3 = boto3.resource('s3')
    s3.Object('container.crossing.us',
              '{}.html'.format(id)).put(Body=make_container(id),
                                        ContentType='text/html')
    s3.Object('video.crossing.us',
              '{}.mp4'.format(id)).put(Body=open(filename, 'rb'),
                                       ContentType='video/mp4')
    s3.Object('gif.crossing.us',
              '{}.gif'.format(id)).put(Body=open(gifname, 'rb'),
                                       ContentType='image/gif')

    conn = pg8000.connect(**DATABASE_SETTINGS)
    cur = conn.cursor()

    cur.execute(
        """
        INSERT INTO """ + SECOND_TABLE + """ 
        VALUES (%s);
        """, [id])
    conn.commit()

    cur.close()
    conn.close()

    os.remove(filename)
    os.remove(gifname)

    return format_response(200, {})
Exemple #2
0
def do_compile(compiler_conf, pack):
    with open(internal_path('work/compile/work/opt/comp_opt'), 'w') as f:
        f.write(compiler_conf['command_line'])

    with open(internal_path('work/compile/work/opt/inject_comp_opt'),
              'w') as f:
        f.write(compiler_conf['inject_command_line'])

    with open(internal_path('work/compile/work/opt/link_opt'), 'w') as f:
        f.write(compiler_conf['link_command_line'])

    with open(internal_path('work/compile/work/opt/strip_opt'), 'w') as f:
        f.write(compiler_conf['strip_command_line'])

    for fname in compiler_conf['inject_files']:
        copy(os.path.join(pack.path, fname),
             internal_path(os.path.join('work/compile/work/inject', fname)))

    container = make_container(image_name,
                               ['/mnt/work/gcc.sh'], common_binds(),
                               common_host_config(compiler_conf))
    # TODO check exit code to determine possible errors
    return common_compile(container, compiler_conf)
Exemple #3
0
def do_run(runner_conf, additional_binds=None):
    real_location = runner_conf['location']

    container = make_container(runner_conf['image'], command(),
                               binds(real_location, additional_binds),
                               host_config(runner_conf))
    docker_cli.start(container)

    # wait until runner says it's ready
    if not file_spinlock(
            internal_path(os.path.join(real_location, 'out/ready')), 1.0):
        raise RuntimeError(
            'Runner\'s wrapper script had not unlocked "ready" lock within 1 second.'
        )

    # bare initialization of docker container would bring memory peak usage indicator
    # to the value of 7-10 MB (at least), we need to reset the counter to zero after
    # the container has initialized in order to have more reliable measurement
    reset_memory_peak(container)

    # tell runner that it can begin the test
    open(internal_path(os.path.join(real_location, 'in/ready_ok')),
         'w').close()
    return container
Exemple #4
0
def do_run(runner_conf, test_unit, pack):
    prog_container = make_container(prog_image_name,
                                    ['/mnt/scripts/run-test.sh'], binds(),
                                    common_host_config(runner_conf))
    service_container = make_container(runner_conf['service']['image'],
                                       ['/mnt/scripts/run-service.sh'],
                                       binds(),
                                       common_host_config(runner_conf))

    docker_cli.start(service_container)
    docker_cli.start(prog_container)

    limit_sec = (float(runner_conf['limits']['timeout']) / 1000.0) + 0.5
    test_finished = file_spinlock(internal_path('work/run/in/finished'),
                                  limit_sec)
    service_finished = file_spinlock(internal_path('work/run/in/svc-finished'),
                                     1.0)

    service_info = inspect_container(service_container)

    serv_stdout, serv_stderr = destroy_container(service_container)
    prog_stdout, prog_stderr = destroy_container(prog_container)

    error_reason = None

    if not service_info['State'][
            'Running'] and service_info['State']['ExitCode'] != 0:
        error_reason = "service crashed (exit code: {})".format(
            service_info['State']['ExitCode'])
    elif serv_stderr:
        error_reason = "stderr of the service is not empty"

    if error_reason:
        try:
            with open(internal_path('work/run/service/svc-output.txt'),
                      'r') as f:
                result_file_content = f.read()
        except IOError as e:
            result_file_content = "(failed to load)"

        raise RunnerConfigurationError(
            'Service container failed. ' + 'Exit code: ' +
            str(service_info['State']['ExitCode']) + '\n\n' +
            '--- SERVICE CONTAINER STDOUT ---\n' + serv_stdout + '\n\n' +
            '--- SERVICE CONTAINER STDERR ---\n' + serv_stderr + '\n\n' +
            '--- SERVICE CONTAINER RESULT FILE ---\n' + result_file_content)

    logging.info('Prog stdout: {}'.format(prog_stdout))
    logging.info('Prog stderr: {}'.format(prog_stderr))
    logging.info('Serv stdout: {}'.format(serv_stdout))
    logging.info('Serv stderr: {}'.format(serv_stderr))

    if not test_finished or not service_finished:
        return ExecStatus('hard_timeout',
                          timeout=runner_conf['limits']['timeout'],
                          exec_time=runner_conf['limits']['timeout'] + 500)

    data = json.loads(prog_stdout)

    status = 'ok'

    if data['exit_code'] != 0:
        status = 'bad_exit_code'
    elif int(data['exec_time']) > runner_conf['limits']['timeout']:
        status = 'soft_timeout'

    return ExecStatus(status, runner_conf['limits']['timeout'],
                      data['exit_code'], data['exec_time'], None,
                      internal_path('work/run/service/svc-output.txt'))
Exemple #5
0
def do_run(runner_conf, test_unit, pack):
    container = make_container(image_name, common_command(), common_binds(), common_host_config(runner_conf))
    return common_run(container, runner_conf)
Exemple #6
0
def do_run(runner_conf, test_unit, pack):
    network_id = create_network()

    srv_container = make_container(
        runner_conf['server']['image'],
        ['/mnt/scripts/run-server.sh'],
        server_binds(),
        common_host_config(runner_conf),
        networking_config=get_endpoint_config('server')
    )

    docker_cli.start(srv_container)

    cli_container = make_container(
        runner_conf['client']['image'],
        ['/mnt/scripts/run-client.sh'],
        client_binds(),
        common_host_config(runner_conf),
        networking_config=get_endpoint_config('client')
    )

    docker_cli.start(cli_container)

    if not file_spinlock(internal_path('work/run/client/ready'), 1.0):
        raise RuntimeError('Client\'s wrapper script did not unlock "ready" lock within 1 second.')

    reset_memory_peak(cli_container)

    if not file_spinlock(internal_path('work/run/server/ready'), 1.0):
        raise RuntimeError('Server\'s wrapper script did not unlock "ready" lock within 1 second.')

    open(internal_path('work/run/client/ready_ok'), 'w').close()

    test_limit_sec = (float(runner_conf['limits']['timeout']) / 1000.0) + 0.5
    test_finished = file_spinlock(internal_path('work/run/client/finished'), test_limit_sec)

    server_finished = file_spinlock(internal_path('work/run/server/finished'), 1.0)

    try:
        stats = quickly_get_stats(cli_container)
    except QuickStatsNotAvailable:
        stats = docker_cli.stats(container=cli_container, decode=True, stream=False)

    used_memory = stats['memory_stats']['max_usage']

    srv_stdout, srv_stderr = destroy_container(srv_container)
    cli_stdout, cli_stderr = destroy_container(cli_container)
    docker_cli.remove_network(network_id)
    del network_id

    logging.info('Cli stdout: {}'.format(cli_stdout))
    logging.info('Cli stderr: {}'.format(cli_stderr))
    logging.info('Srv stdout: {}'.format(srv_stdout))
    logging.info('Srv stderr: {}'.format(srv_stderr))

    if not test_finished:
        return ExecStatus('hard_timeout', timeout=runner_conf['limits']['timeout'],
                          exec_time=runner_conf['limits']['timeout'] + 500)

    if not server_finished:
        logging.warning('Server failed to finish correctly')
        # TODO define behaviour in case of server failing to exit

    # TODO define in config which output to store (server and/or client and/or with/without error)

    fnames = [internal_path('work/run/client/output.txt'), internal_path('work/run/client/error.txt'),
              internal_path('work/run/server/output.txt'), internal_path('work/run/server/error.txt')]

    with open(internal_path('work/run/common-output.txt'), 'w') as co:
        for file in fnames:
            with open(file, 'r') as f:
                co.write("file: {}\n\n".format(file))
                co.write(f.read())

    try:
        # TODO detect malformed JSON, bad exit code, timeout etc.
        data = json.loads(cli_stdout)
    except ValueError:
        # FOR DEBUG:
        return ExecStatus('debug', 4000, '0', '0', used_memory, internal_path('work/run/common-output.txt'))
        # raise RuntimeError('Unable to decode the output from stdout')

    status = 'ok'

    if data['exit_code'] != 0:
        status = 'bad_exit_code'

    return ExecStatus(status, runner_conf['limits']['timeout'], data['exit_code'], data['exec_time'], used_memory,
                      internal_path('work/run/common-output.txt'))
Exemple #7
0
def do_compile(package_config):
    container = make_container(image_name, make_command(), common_binds(),
                               common_host_config(package_config))
    return common_compile(container, package_config)