Пример #1
0
def common_prepare():
    rmtree(internal_path('work/compile'), ignore_errors=True)

    makedirs(internal_path('work/compile/in'))
    makedirs(internal_path('work/compile/out'))
    makedirs(internal_path('work/compile/work'))

    chown_recursive(internal_path('work/compile'), DOCKER_CONTAINER_USER,
                    DOCKER_CONTAINER_GROUP)
Пример #2
0
def copy_data_directory(pack, test_unit):
    if path.exists(path.join(pack.path, 'data', test_unit.name)):
        rmtree(internal_path('work/run/data'))
        copytree(path.join(pack.path, 'data', test_unit.name), internal_path('work/run/data'))

    # give permissions for data directory
    chmod(internal_path('work/run/data'), 0o777)

    for root, dirs, files in walk(internal_path('work/run/data')):
        for entry in dirs:
            chmod(path.join(root, entry), 0o777)
        for entry in files:
            chmod(path.join(root, entry), 0o666)
Пример #3
0
def common_binds():
    return {
        internal_path("work/compile/in"): {
            "bind": "/mnt/in",
            "mode": "ro"
        },
        internal_path("work/compile/work"): {
            "bind": "/mnt/work",
            "mode": "rw"
        },
        internal_path("work/compile/out"): {
            "bind": "/mnt/out",
            "mode": "rw"
        }
    }
Пример #4
0
def do_wait(runner_conf, container, max_time=None):
    real_location = runner_conf['location']

    # wait (timeout + 0.2) seconds for the runner to indicate that the work is finished
    # if it would not finish within that time, we destroy the container and say that
    # this test is hitting "hard_timeout"
    limit_sec = (float(runner_conf['limits']['timeout']) / 1000.0) + 0.5

    if max_time:
        limit_sec = min(limit_sec, max_time)

    test_finished = file_spinlock(
        internal_path(os.path.join(real_location, 'out/finished')), limit_sec)

    # collect the statistics of the container before stopping it
    # (after stopping docker would not allow to collect stats anymore)
    try:
        stats = quickly_get_stats(container)
    except QuickStatsNotAvailable:
        stats = docker_cli.stats(container=container,
                                 decode=True,
                                 stream=False)

    stdout, stderr = destroy_container(container)
    return make_result(runner_conf, stdout, stderr, stats, test_finished)
Пример #5
0
def download_package_from_url(url, dest):
    logging.info('Attempting to download missing package from: ' + url)
    tmp_path = internal_path("work/dl_package.zip")

    try:
        req = requests.get(url, stream=True, timeout=10)

        if req.status_code == 200:
            with open(tmp_path, 'wb') as tmp_file:
                req.raw.decode_content = True
                shutil.copyfileobj(req.raw, tmp_file)
        else:
            raise PackageLoadError(
                'Package download failed, server said: {} {}'.format(
                    req.status_code, req.reason))
    except (RuntimeError, IOError) as e:
        raise PackageLoadError(
            'Package download failed due to an error') from e

    logging.info('Extracting package...')
    try:
        with ZipFile(tmp_path, "r") as z:
            z.extractall(dest)
    except BadZipfile as e:
        raise PackageLoadError('Malformed package zip file') from e

    os.remove(tmp_path)
Пример #6
0
def do_run_test(submission, env_conf, pack, test_unit):
    runner, runner_conf = plugin_loader.get('runners', pack.config['runner']['name'], pack.config['runner'])

    runner.do_prepare(runner_conf)
    # input everything which was outputted from the compilation
    rmtree(internal_path('work/run/in'))
    copytree(internal_path('work/compile/out'), internal_path(path.join('work/run/in')))

    copy_data_directory(pack, test_unit)

    # upload input file for the test for the runner
    copy(test_unit.runner_meta['input_file'], internal_path('work/run/in/input.txt'))

    prog_container = runner.do_run(runner_conf)
    exc_res = runner.do_wait(runner_conf, prog_container)
    cmp_res = compare(internal_path('work/run/out/output.txt'), test_unit.runner_meta['output_file'])

    try:
        if test_unit.runner_meta['options']['store_output'] != 'none':
            logging.info('Storing output for the test {}'.format(test_unit.name))
            with open(test_unit.runner_meta['output_file'], 'r', encoding='utf-8') as output_file:
                task_queue.upload_test_output(submission['uuid'], test_unit.name, output_file.read(), test_unit.runner_meta['store_output'])
    except KeyError:
        pass

    points = 0.0

    try:
        max_points = test_unit.runner_meta['options']['points']
    except KeyError:
        max_points = 1.0

    if exc_res.status != 'ok':
        status = exc_res.status
    elif not cmp_res:
        status = 'bad_answer'
    else:
        status = 'ok'
        points = max_points

    return TestStatus(name=test_unit.name, status=status, time=exc_res.exec_time,
                      timeout=exc_res.timeout, memory=exc_res.memory, points=points, max_points=max_points)
Пример #7
0
def prune_unused_packages():
    packages_dir = internal_path('packages')

    # if package cache doesn't exist yet then we don't care
    if not os.path.isdir(packages_dir):
        return

    for dir in os.listdir(packages_dir):
        path = path_join(packages_dir, dir)

        if getmtime(path) + 60 * 60 < time():
            logging.info('Removing unused package: {}'.format(dir))
            shutil.rmtree(path)
Пример #8
0
def do_run(runner_conf, additional_binds=None):
    real_location = runner_conf['location']

    container = make_container(runner_conf['image'], command(),
                               binds(real_location, additional_binds),
                               host_config(runner_conf))
    docker_cli.start(container)

    # wait until runner says it's ready
    if not file_spinlock(
            internal_path(os.path.join(real_location, 'out/ready')), 1.0):
        raise RuntimeError(
            'Runner\'s wrapper script had not unlocked "ready" lock within 1 second.'
        )

    # bare initialization of docker container would bring memory peak usage indicator
    # to the value of 7-10 MB (at least), we need to reset the counter to zero after
    # the container has initialized in order to have more reliable measurement
    reset_memory_peak(container)

    # tell runner that it can begin the test
    open(internal_path(os.path.join(real_location, 'in/ready_ok')),
         'w').close()
    return container
Пример #9
0
def binds(real_location, additional_binds=None):
    base_binds = {
        internal_path(os.path.join(real_location, "scripts")): {
            "bind": "/mnt/scripts",
            "mode": "ro"
        },
        internal_path(os.path.join(real_location, "in")): {
            "bind": "/mnt/in",
            "mode": "ro"
        },
        internal_path(os.path.join(real_location, "out")): {
            "bind": "/mnt/out",
            "mode": "rw"
        },
        internal_path(os.path.join(real_location, "data")): {
            "bind": "/mnt/data",
            "mode": "rw"
        }
    }

    if additional_binds:
        base_binds.update(additional_binds)

    return base_binds
Пример #10
0
def save_leak_report(container):
    inspect_data = json.dumps(docker_cli.inspect_container(container), indent=4, separators=(',', ': '))
    logs = docker_cli.logs(container, stdout=True, stderr=True, stream=False, timestamps=True).decode('utf-8')

    try:
        makedirs(internal_path('error_report'))
    except FileExistsError:
        pass

    now = datetime.datetime.now()

    report_name = 'container_leak_{}_{}.log'.format(now.strftime('%Y%m%d_%H%M%S'), container)
    full_report_path = internal_path(path_join('error_report', report_name))

    with open(full_report_path, 'w') as report_f:
        report_f.write('Report about leaked container {}\n'.format(container))
        report_f.write('Generated on: {}\n'.format(now.isoformat()))
        report_f.write('\n=== Inspection data (length: {}) ===\n'.format(len(inspect_data)))
        report_f.write(inspect_data)
        report_f.write('\n=== Container logs (length: {}) ===\n'.format(len(logs)))
        report_f.write(logs)
        report_f.write('\n=== End of report ===\n')

    return full_report_path
Пример #11
0
def get_package(name, version, url=None):
    """
    Fetch package with matching name and version.
    If it is not possible, package will be fetched from given url.
    """
    prune_unused_packages()

    file_name = name + "-v" + str(version)
    path = internal_path(os.path.join('packages', file_name))

    if not os.path.isdir(path):
        if url:
            download_package_from_url(url, path)
        else:
            task_queue.download_package(name, version, path)
    else:
        os.utime(path, None)

    yml_file = os.path.join(path, 'config.yml')
    json_file = os.path.join(path, 'config.json')

    if os.path.exists(yml_file):
        config_format = 'yml'
        config_fname = yml_file
    elif os.path.exists(json_file):
        config_format = 'json'
        config_fname = json_file
    else:
        raise PackageLoadError(
            'No configuration file found inside package, tried config.yml and config.json.'
        )

    config = None

    with open(os.path.join(path, config_fname), 'r') as config_file:
        try:
            if config_format == 'yml':
                config = yaml.load(config_file)
            elif config_format == 'json':
                config = json.load(config_file)
            else:
                raise RuntimeError(
                    'Invalid value provided in `config_format`.')
        except (IOError, ValueError, yaml.YAMLError,
                json.JSONDecodeError) as e:
            raise PackageLoadError('Failed to load package config.yml') from e

    return Package(file_name, path, raw_config=config, config=None)
Пример #12
0
def do_prepare(runner_conf):
    real_location = runner_conf['location']

    rmtree(internal_path(real_location), ignore_errors=True)

    # create working directory "data"
    makedirs(internal_path(os.path.join(real_location, 'data')))

    # create the directory for runner's input and output
    makedirs(internal_path(os.path.join(real_location, 'in')))
    makedirs(internal_path(os.path.join(real_location, 'out')))

    # create the directory for all necessary scripts
    makedirs(internal_path(os.path.join(real_location, 'scripts')))
    # copy the wrapper script which would be used as container's starting command
    copy(wrapper_path,
         internal_path(os.path.join(real_location, 'scripts/run.sh')))

    # make sure that the container starting command is executable
    chmod(internal_path(os.path.join(real_location, 'scripts/run.sh')), 0o500)
    # ensure that exec_user will have the proper permissions
    chown_recursive(internal_path(real_location), DOCKER_CONTAINER_USER,
                    DOCKER_CONTAINER_GROUP)
Пример #13
0
def pick_compilation_units(extensions):
    for project_file in listdir(internal_path("work/compile/in")):
        for ext in extensions:
            if project_file.endswith(ext):
                yield path_join('/mnt/in', project_file)
Пример #14
0
def do_run_test(submission, env_conf, pack, test_unit):
    runner, runner_conf = plugin_loader.get('runners',
                                            pack.config['runner']['name'],
                                            pack.config['runner'])
    runner_conf['location'] = 'work/run'

    # configure locations of the runners
    srv_runner, srv_runner_conf = plugin_loader.get(
        'runners', pack.config['service_runner']['name'],
        pack.config['service_runner'])
    srv_runner_conf['location'] = 'work/srv'

    srv_runner.do_prepare(srv_runner_conf)

    runner.do_prepare(runner_conf)
    # input everything which was outputted from the compilation
    rmtree(internal_path('work/run/in'), ignore_errors=True)
    copytree(internal_path('work/compile/out'),
             internal_path(path.join('work/run/in')))

    common.copy_data_directory(pack, test_unit)

    # upload service program
    rmtree(internal_path('work/srv/in'), ignore_errors=True)
    copytree(path.join(pack.path, 'service'),
             internal_path(path.join('work/srv/in')))
    chmod(internal_path('work/srv/in/prog'), 0o777)

    # upload input file for the test for service
    copy(test_unit.runner_meta['input_file'],
         internal_path('work/srv/in/input.txt'))

    # create pipes which will be used for communication
    mkfifo(internal_path('work/run/in/input.txt'))
    mkfifo(internal_path('work/run/out/output.txt'))
    chmod(internal_path('work/run/in/input.txt'), 0o777)
    chmod(internal_path('work/run/out/output.txt'), 0o777)

    srv_container = srv_runner.do_run(
        srv_runner_conf,
        additional_binds={
            internal_path(os.path.join("work/run/in")): {
                "bind": "/mnt/prog-in",
                "mode": "rw"
            },
            internal_path(os.path.join("work/run/out")): {
                "bind": "/mnt/prog-out",
                "mode": "rw"
            },
        })

    exc_container = runner.do_run(runner_conf)

    # wait till server is ready
    if not file_spinlock(internal_path('work/srv/out/srv-ready'), 1.0):
        raise RuntimeError('Service didn\'t started within 1 second.')

    # run user's program
    # TODO context manager for containers?
    exc_res = runner.do_wait(runner_conf, exc_container)

    if exc_res.status in ['soft_timeout', 'hard_timeout']:
        # TODO maybe some better solution?
        srv_runner.do_wait(srv_runner_conf, srv_container, max_time=0.0)
        return TestStatus(name=test_unit.name,
                          status='hard_timeout',
                          time=exc_res.exec_time,
                          timeout=exc_res.timeout,
                          points=0,
                          max_points=1)

    svc_res = srv_runner.do_wait(srv_runner_conf, srv_container, max_time=1.0)

    if svc_res.status == 'bad_exit_code':
        # something went wrong with the service, internal error
        raise RuntimeError('Service crashed.')
    elif svc_res.status in ['soft_timeout', 'hard_timeout']:
        return TestStatus(name=test_unit.name,
                          status='hard_timeout',
                          time=svc_res.exec_time,
                          timeout=svc_res.timeout,
                          points=0,
                          max_points=1)

    with open(internal_path('work/srv/out/output.txt'), 'r',
              encoding='utf-8') as output_file:
        out_data = json.loads(output_file.read())

    try:
        if test_unit.runner_meta['options']['store_output'] != 'none':
            logging.info('Storing output for the test {}'.format(
                test_unit.name))
            task_queue.upload_test_output(
                submission['uuid'], test_unit.name, out_data['message'],
                test_unit.runner_meta['store_output'])
    except KeyError:
        pass

    if exc_res.status != 'ok':
        status = exc_res.status
    else:
        status = 'ok'

    points = out_data['points']
    max_points = out_data['max_points']

    return TestStatus(name=test_unit.name,
                      status=status,
                      time=exc_res.exec_time,
                      timeout=exc_res.timeout,
                      memory=exc_res.memory,
                      points=points,
                      max_points=max_points)