def test_uniform_combinatoric_is_always_valid(filename):

    failures = []
    contents = None
    with open(filename, 'r') as f:
        contents = f.read()
        exec(contents, globals(), locals())

    if 'block' not in vars():
        pytest.fail(
            "File did not produce a variable named 'block', aborting. file={}".
            format(filename))

    # Build the CNF for this block
    build_cnf_result = build_cnf(vars()['block'])

    # Build the sampler
    enumerator = UCSolutionEnumerator(vars()['block'])

    # Generate some samples, make sure they're all SAT.
    sample_count = min(enumerator.solution_count(), 200)
    print("Checking that UC samples are SAT for {}, sample count={}".format(
        filename, sample_count))
    for s in range(sample_count):
        sample = enumerator.generate_solution_variables()
        if not cnf_is_satisfiable(build_cnf_result +
                                  CNF(cnf_to_json([And(sample)]))):
            failures.append("Found UNSAT solution! Solution={} File={}".format(
                sample, filename))

    if failures:
        pytest.fail(
            '{} failures occurred in SAT checks for UC sampler: {}'.format(
                len(failures), failures))
Beispiel #2
0
    def to_json(self, support: int, solution_count: int):

        # Taken from the unigen2.py script: https://bitbucket.org/kuldeepmeel/unigen/src/4677b2ec4553b2a44a31910db0037820abdc1394/UniGen2.py?at=master&fileviewer=file-view-default
        kappa = 0.638
        pivot_unigen = math.ceil(4.03 * (1 + 1 / kappa) * (1 + 1 / kappa))
        log_count = math.log(solution_count, 2)
        start_iteration = int(
            round(log_count + math.log(1.8, 2) -
                  math.log(pivot_unigen, 2))) - 2

        return json.dumps({
            "fresh":
            self.fresh,
            "cnfs":
            cnf_to_json(self.cnfs),
            "requests":
            list(map(lambda r: r.to_dict(), self.ll_requests)),
            "unigen": {
                "support":
                support,
                "arguments": [
                    "--verbosity=0", "--samples=100", "--kappa=" + str(kappa),
                    "--pivotUniGen=" + str(pivot_unigen),
                    "--startIteration=" + str(start_iteration),
                    "--maxLoopTime=3000", "--maxTotalTime=72000",
                    "--tApproxMC=1", "--pivotAC=60", "--gaussuntil=400"
                ]
            }
        })
Beispiel #3
0
def is_cnf_still_sat(block: Block, additional_clauses: List[And]) -> bool:
    backend_request = block.build_backend_request()
    cnf = CNF(backend_request.get_cnfs_as_json()) + CNF(
        cnf_to_json(additional_clauses))
    combined_cnf = combine_cnf_with_requests(
        cnf, backend_request.fresh - 1, block.variables_per_sample(),
        backend_request.get_requests_as_generation_requests())
    return cnf_is_satisfiable(combined_cnf)
Beispiel #4
0
def test_cnf_to_json():
    assert cnf_to_json([And([1])]) == [[1]]

    assert cnf_to_json([And([Or([1])])]) == [[1]]
    assert cnf_to_json([And([Or([Not(5)])])]) == [[-5]]
    assert cnf_to_json([And([Or([1, 2, Not(4)])])]) == [[1, 2, -4]]

    assert cnf_to_json([And([Or([1, 4]), Or([5, -4, 2]), Or([-1, -5])])]) == [
        [1, 4],
        [5, -4, 2],
        [-1, -5]]

    assert cnf_to_json([And([
        Or([1, 3, Not(2)]),
        Or([1, 3, 5]),
        Or([1, 4, Not(2)]),
        Or([1, 4, 5]),
        Or([2, 3, 1, 5]),
        Or([2, 4, 1, 5])])]) == [
            [1, 3, -2],
            [1, 3, 5],
            [1, 4, -2],
            [1, 4, 5],
            [2, 3, 1, 5],
            [2, 4, 1, 5]]
Beispiel #5
0
 def get_cnfs_as_json(self):
     return cnf_to_json(self.cnfs)
Beispiel #6
0
    def __generate_sample(block: Block, cnf: CNF,
                          sample_metrics: dict) -> dict:
        sample_metrics['trials'] = []

        # Start a 'committed' list of CNFs
        committed = cast(List[And], [])

        for trial_number in range(block.trials_per_sample()):
            trial_start_time = time()

            trial_metrics = {'t': trial_number + 1, 'solver_calls': []}
            solver_calls = cast(List[dict], trial_metrics['solver_calls'])

            #  Get the variable list for this trial.
            variables = block.variable_list_for_trial(trial_number + 1)
            variables = list(filter(lambda i: i != [], variables))
            potential_trials = list(map(list, product(*variables)))

            trial_metrics['potential_trials'] = len(potential_trials)

            # Use env var to switch between filtering and not
            if GuidedSamplingStrategy.__prefilter_enabled():
                # Flatten the list
                flat_vars = list(chain(*variables))

                # Check SAT for each one
                unsat = []
                for v in flat_vars:
                    t_start = time()
                    full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                        cnf_to_json([And([v])]))
                    allowed = cnf_is_satisfiable(full_cnf)
                    duration_seconds = time() - t_start
                    solver_calls.append({
                        'time': duration_seconds,
                        'SAT': allowed
                    })
                    if not allowed:
                        unsat.append(v)

                # TODO: Count filtering SAT calls separately?

                # Filter out any potential trials with those vars set
                filtered_pts = []
                for pt in potential_trials:
                    if any(uv in pt for uv in unsat):
                        continue
                    else:
                        filtered_pts.append(pt)

                # Record the number filterd out for metrics
                trial_metrics['prefiltered_out'] = len(potential_trials) - len(
                    filtered_pts)
                potential_trials = filtered_pts

            allowed_trials = []
            for potential_trial in potential_trials:
                start_time = time()
                full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                    cnf_to_json([And(potential_trial)]))
                allowed = cnf_is_satisfiable(full_cnf)
                duration_seconds = time() - start_time

                solver_calls.append({'time': duration_seconds, 'SAT': allowed})

                if allowed:
                    allowed_trials.append(potential_trial)

            trial_metrics['allowed_trials'] = len(allowed_trials)
            trial_metrics['solver_call_count'] = len(solver_calls)
            sample_metrics['trials'].append(trial_metrics)

            # Randomly sample a single trial from the uniform distribution of the allowed trials,
            # and commit that trial to the committed sequence.
            trial_idx = np.random.randint(0, len(allowed_trials))
            committed.append(And(allowed_trials[trial_idx]))

            trial_metrics['time'] = time() - trial_start_time

        # Aggregate the total solver calls
        sample_metrics['solver_call_count'] = 0
        for tm in sample_metrics['trials']:
            sample_metrics['solver_call_count'] += tm['solver_call_count']

        # Flatten the committed trials into a list of integers and decode it.
        solution = GuidedSamplingStrategy.__committed_to_solution(committed)
        return SamplingStrategy.decode(block, solution)