예제 #1
0
def generate_corpus(experiment, seed, buffer_size, count):
    random.seed(seed)

    try:
        os.unlink(os.path.join(CORPORA, experiment + '.tar'))
    except FileNotFoundError:
        pass

    shutil.rmtree(os.path.join(CORPORA, experiment), ignore_errors=True)
    mkdirp(os.path.join(CORPORA, experiment))

    experiment = EXPERIMENTS[experiment]

    completed = 0
    while completed < count:
        try:
            data = ConjectureData(
                draw_bytes=lambda data, n: uniform(random, n),
                max_length=buffer_size,
            )
            gen = experiment.generator(data)
            info = experiment.calculate_info(gen)
            error_pred = experiment.calculate_error_predicate(info)
        except StopTest:
            continue
        except Exception:
            continue
        print(info)
        with open(os.path.join(CORPORA, experiment.name, hashlib.sha1(data.buffer).hexdigest()[:16]), "wb") as o:
            o.write(data.buffer)
        completed += 1

    subprocess.check_call(["apack", experiment.name + ".tar", experiment.name], cwd=CORPORA)
    shutil.rmtree(os.path.join(CORPORA, experiment.name))
예제 #2
0
    def accept(fn):
        cachedir = os.path.join(WORKING, "cache", fn.__name__)
        mkdirp(cachedir)

        def cached_fn(*args, skip_locked=False, ignore_cache=False):
            args = [a.name if isinstance(a, Experiment) else a for a in args]
            keyparts = [getattr(f, '__name__', f) for f in args]
            cachefile = os.path.join(cachedir, "::".join(keyparts))
            start = time.monotonic()
            while not os.path.exists(cachefile) or ignore_cache:
                locked = False
                try:
                    locked = claim_lock(cachefile)
                    if not locked and skip_locked:
                        raise Locked()
                    if locked or time.monotonic() >= start + 30:
                        sys.stderr.write(
                            f"{fn.__name__}({', '.join(map(repr, keyparts))})\n"
                        )
                        result = fn(*args)
                        atomic_create_file(cachefile, codec.encode(result))
                        return result
                finally:
                    if locked:
                        release_lock(cachefile)
                time.sleep(random.random())
            with open(cachefile, "rb") as i:
                return codec.decode(i.read())

        cached_fn.__name__ = fn.__name__
        cached_fn.__qualname__ = fn.__qualname__
        return cached_fn
예제 #3
0
def show_stats():
    data = os.path.join(ROOT, "data")
    mkdirp(data)
    with open(os.path.join(data, "reduction-stats.jsons"), "w") as o:
        for e in EXPERIMENTS:
            for s in calculate_stats(e):
                t = json.dumps(s)
                print(t)
                print(t, file=o)
예제 #4
0
def atomic_compile(source, target):
    mkdirp(os.path.dirname(target))
    delete_older_target(source, target)
    if os.path.exists(target):
        return
    tmp_target = target + f"-{random.getrandbits(64)}.tmp"
    try:
        subprocess.check_call(
            ["gcc", "-Wall", "-Werror", "-O2", source, "-o", tmp_target])
        os.rename(tmp_target, target)
    except FileExistsError:
        pass
    finally:
        try:
            os.unlink(tmp_target)
        except FileNotFoundError:
            pass
예제 #5
0
def import_corpus(experiment, source, buffer_size, count):
    random.seed(0)
    targets = os.listdir(source)
    targets.sort()
    random.shuffle(targets)

    try:
        os.unlink(os.path.join(CORPORA, experiment + '.tar'))
    except FileNotFoundError:
        pass

    shutil.rmtree(os.path.join(CORPORA, experiment), ignore_errors=True)
    mkdirp(os.path.join(CORPORA, experiment))

    experiment = EXPERIMENTS[experiment]

    completed = 0
    for f in targets:
        f = os.path.join(source, f)
        if os.stat(f).st_size > buffer_size:
            continue
        print(f)
        try:
            with open(f, 'rb') as i:
                buf = i.read()
            gen = ConjectureData.for_buffer(buf).draw(experiment.generator)
            info = experiment.calculate_info(gen)
            error_pred = experiment.calculate_error_predicate(info)
        except Exception:
            traceback.print_exc()
            continue
        with open(os.path.join(CORPORA, experiment.name, os.path.basename(f)), "wb") as o:
            o.write(buf)
        completed += 1
        if completed >= count:
            break

    subprocess.check_call(["apack", experiment.name + ".tar", experiment.name], cwd=CORPORA)
    shutil.rmtree(os.path.join(CORPORA, experiment.name))
예제 #6
0
def build_all():
    if os.getpid() != original_pid:
        logdir = os.path.join(WORKING, "logs")
        mkdirp(logdir)
        counter = 0
        extra = ""
        while True:
            try:
                logfile = open(
                    os.path.join(
                        logdir,
                        datetime.now().strftime("%Y-%m-%d-%H:%M")
                        + f"-{os.getpid()}{extra}.log",
                    ),
                    "x",
                )
                break
            except FileExistsError:
                counter += 1
                assert counter < 10
                extra = f"-{counter}"

        # reopen with line buffering
        sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 1)
        sys.stderr = os.fdopen(sys.stderr.fileno(), "w", 1)

        # redirect stdout and stderr to the log file opened above
        os.dup2(logfile.fileno(), sys.stdout.fileno())
        os.dup2(logfile.fileno(), sys.stderr.fileno())

    targets = [
        (f, ex, n)
        for ex in EXPERIMENTS
        for n in corpus_for_experiment(ex)
        for f in [reduce_with_hypothesis, reduce_badly_with_hypothesis, reduce_with_picire, reduce_with_creduce, final_normalized_hypothesis_then_lines]
    ]

    random.shuffle(targets)

    working = tempfile.mkdtemp("working-build-all")
    # black (and possibly other executables?) get a bit upset when trying to
    # run too many copies in parallel and end up corrupting the cache dir. As
    # as result we run each process in its own working directory with its
    # own cache directory.
    cachedir = os.path.join(working, ".cache")
    os.makedirs(cachedir)
    prev_cache = os.environ.get("XDG_CACHE_HOME")
    prev_dir = os.getcwd()
    try:
        os.chdir(working)
        os.environ["XDG_CACHE_HOME"] = cachedir
        backoff = 0.0
        any_skipped = True
        while any_skipped:
            time.sleep(backoff)
            any_skipped = False
            for fn, *args in targets:
                try:
                    fn(*args, skip_locked=True)
                except Locked:
                    any_skipped = True
                    continue

            backoff = (backoff + random.random()) * 2
    finally:
        shutil.rmtree(working)
        os.chdir(prev_dir)
        if prev_cache is not None:
            os.environ["XDG_CACHE_HOME"] = prev_cache
        else:
            del os.environ["XDG_CACHE_HOME"]