Esempio n. 1
0
    def sample(block: Block,
               sample_count: int,
               min_search: bool = False) -> SamplingResult:

        backend_request = block.build_backend_request()
        if block.errors:
            for e in block.errors:
                print(e)
                if "WARNING" not in e:
                    return SamplingResult([], {})

        solutions = sample_uniform(
            sample_count, CNF(backend_request.get_cnfs_as_json()),
            backend_request.fresh - 1, block.variables_per_sample(),
            backend_request.get_requests_as_generation_requests(), False)

        if not solutions:
            from sweetpea.constraints import AtLeastKInARow
            if min_search:
                return SamplingResult([], {})
            else:
                max_constraints = list(
                    map(
                        lambda x: cast(AtLeastKInARow, x).max_trials_required,
                        filter(lambda c: isinstance(c, AtLeastKInARow),
                               block.constraints)))

                if max_constraints:
                    print(
                        "No solution found... We require a minimum trials contraint to find a solution."
                    )
                    max_constraint = max(max_constraints)
                    min_constraint = block.trials_per_sample() + 1
                    original_min_trials = block.min_trials
                    last_valid_min_contraint = max_constraint
                    last_valid = SamplingResult([], {})
                    progress = tqdm(total=math.ceil(
                        math.log(max_constraint - min_constraint)) + 1,
                                    file=sys.stdout)
                    while True:
                        current_constraint = int(
                            (max_constraint - min_constraint + 1) /
                            2) + min_constraint
                        block.min_trials = original_min_trials
                        c = minimum_trials(current_constraint)
                        c.validate(block)
                        c.apply(block, None)
                        block.constraints.append(c)
                        res = UnigenSamplingStrategy.sample(
                            block, sample_count, True)
                        progress.update(1)
                        if res.samples:
                            if current_constraint <= min_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    current_constraint, ".")
                                return res
                            else:
                                last_valid_min_contraint = current_constraint
                                last_valid = res
                                max_constraint = current_constraint - 1
                        else:
                            if max_constraint <= current_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    last_valid_min_contraint, ".")
                                return last_valid
                            else:
                                min_constraint = current_constraint + 1
                    progress.close()
                    return result
                else:
                    return SamplingResult([], {})

        result = list(
            map(lambda s: SamplingStrategy.decode(block, s.assignment),
                solutions))
        return SamplingResult(result, {})
    def generate_derivations(block: Block) -> List[Derivation]:
        """Usage::

            >>> import operator as op
            >>> color = Factor("color", ["red", "blue"])
            >>> text  = Factor("text",  ["red", "blue"])
            >>> conLevel  = DerivedLevel("con", WithinTrial(op.eq, [color, text]))
            >>> incLevel  = DerivedLevel("inc", WithinTrial(op.ne, [color, text]))
            >>> conFactor = Factor("congruent?", [conLevel, incLevel])
            >>> design = [color, text, conFactor]
            >>> crossing = [color, text]
            >>> block = fully_cross_block(design, crossing, [])
            >>> DerivationProcessor.generate_derivations(block)
            [Derivation(derivedIdx=4, dependentIdxs=[[0, 2], [1, 3]]), Derivation(derivedIdx=5, dependentIdxs=[[0, 3], [1, 2]])]

        In the example above, the indicies of the design are:

        ===  =============
        idx  level
        ===  =============
        0    color:red
        1    color:blue
        2    text:red
        3    text:blue
        4    conFactor:con
        5    conFactor:inc
        ===  =============

        So the tuple ``(4, [[0,2], [1,3]])`` represents the information that
        the derivedLevel con is true iff ``(color:red && text:red) ||
        (color:blue && text:blue)`` by pairing the relevant indices together.

        :rtype:
            returns a list of tuples. Each tuple is structured as:
            ``(index of the derived level, list of dependent levels)``
        """
        derived_factors: List[DerivedFactor] = [factor for factor in block.design if isinstance(factor, DerivedFactor)]
        accum = []

        for factor in derived_factors:
            according_level: Dict[Tuple[Any, ...], DerivedLevel] = {}
            for level in factor.levels:
                cross_product: List[Tuple[Level, ...]] = level.get_dependent_cross_product()
                valid_tuples: List[Tuple[Level, ...]] = []
                for level_tuple in cross_product:
                    names = [level.name for level in level_tuple]
                    if level.window.width != 1:
                        # NOTE: mypy doesn't like this, but I'm not rewriting
                        #       it right now. Need to replace `chunk_list` with
                        #       a better version.
                        names = list(chunk_list(names, level.window.width))  # type: ignore
                    result = level.window.predicate(*names)
                    if not isinstance(result, bool):
                        raise ValueError(f"Expected derivation predicate to return bool; got {type(result)}.")
                    if level.window.predicate(*names):
                        valid_tuples.append(level_tuple)
                        if level_tuple in according_level:
                            raise ValueError(f"Factor {factor.name} matches {according_level[level_tuple].name} and "
                                             f"{level.name} with assignment {names}.")
                        according_level[level_tuple] = level

                if not valid_tuples:
                    print(f"WARNING: There is no assignment that matches factor {factor.name} with level {level.name}.")

                valid_indices = [[block.first_variable_for_level(level.factor, level) for level in valid_tuple]
                                 for valid_tuple in valid_tuples]
                shifted_indices = DerivationProcessor.shift_window(valid_indices,
                                                                   level.window,
                                                                   block.variables_per_trial())
                level_index = block.first_variable_for_level(factor, level)
                accum.append(Derivation(level_index, shifted_indices, factor))
        return accum
Esempio n. 3
0
def __generate_encoding_diagram(blk: Block) -> str:
    diagram_str = ""

    design_size = blk.variables_per_trial()
    num_trials = blk.trials_per_sample()
    num_vars = blk.variables_per_sample()

    largest_number_len = len(str(num_vars))

    header_widths = []
    row_format_str = '| {:>7} |'
    for f in blk.design:
        # length of all levels concatenated for this factor
        level_names = list(map(get_external_level_name, f.levels))
        level_name_widths = [
            max(largest_number_len, l) for l in list(map(len, level_names))
        ]

        level_names_width = sum(level_name_widths) + len(
            level_names) - 1  # Extra length for spaces in between names.
        factor_header_width = max(len(f.factor_name), level_names_width)
        header_widths.append(factor_header_width)

        # If the header is longer than the level widths combined, then they need to be lengthened.
        diff = factor_header_width - level_names_width
        if diff > 0:
            idx = 0
            while diff > 0:
                level_name_widths[idx] += 1
                idx += 1
                diff -= 1
                if idx >= len(level_name_widths):
                    idx = 0

        # While we're here, build up the row format str.
        row_format_str = reduce(lambda a, b: a + ' {{:^{}}}'.format(b),
                                level_name_widths, row_format_str)
        row_format_str += ' |'

    header_format_str = reduce(lambda a, b: a + ' {{:^{}}} |'.format(b),
                               header_widths, '| {:>7} |')
    factor_names = list(map(lambda f: f.factor_name, blk.design))
    header_str = header_format_str.format(*["Trial"] + factor_names)
    row_width = len(header_str)

    # First line
    diagram_str += ('-' * row_width) + '\n'

    # Header
    diagram_str += header_str + '\n'

    # Level names
    all_level_names = [
        ln for (fn, ln) in get_all_external_level_names(blk.design)
    ]
    diagram_str += row_format_str.format(*['#'] + all_level_names) + '\n'

    # Separator
    diagram_str += ('-' * row_width) + '\n'

    # Variables
    for t in range(num_trials):
        args = [str(t + 1)]
        for f in blk.design:
            if f.applies_to_trial(t + 1):
                variables = [
                    blk.first_variable_for_level(f, l) + 1 for l in f.levels
                ]
                if f.has_complex_window():

                    def acc_width(w) -> int:
                        return w.width + (
                            acc_width(w.args[0].levels[0].window) -
                            1 if w.args[0].has_complex_window() else 0)

                    width = acc_width(f.levels[0].window)
                    stride = f.levels[0].window.stride
                    stride_offset = (stride - 1) * int(t / stride)
                    offset = t - width + 1 - stride_offset
                    variables = list(
                        map(lambda n: n + len(variables) * offset, variables))
                else:
                    variables = list(
                        map(lambda n: n + design_size * t, variables))

                args += list(map(str, variables))
            else:
                args += list(repeat('', len(f.levels)))

        diagram_str += row_format_str.format(*args) + '\n'

    # Footer
    diagram_str += ('-' * row_width) + '\n'
    return diagram_str
Esempio n. 4
0
    def __generate_sample(block: Block, cnf: CNF,
                          sample_metrics: dict) -> dict:
        sample_metrics['trials'] = []

        # Start a 'committed' list of CNFs
        committed = cast(List[And], [])

        for trial_number in range(block.trials_per_sample()):
            trial_start_time = time()

            trial_metrics = {'t': trial_number + 1, 'solver_calls': []}
            solver_calls = cast(List[dict], trial_metrics['solver_calls'])

            #  Get the variable list for this trial.
            variables = block.variable_list_for_trial(trial_number + 1)
            variables = list(filter(lambda i: i != [], variables))
            potential_trials = list(map(list, product(*variables)))

            trial_metrics['potential_trials'] = len(potential_trials)

            # Use env var to switch between filtering and not
            if GuidedSamplingStrategy.__prefilter_enabled():
                # Flatten the list
                flat_vars = list(chain(*variables))

                # Check SAT for each one
                unsat = []
                for v in flat_vars:
                    t_start = time()
                    full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                        cnf_to_json([And([v])]))
                    allowed = cnf_is_satisfiable(full_cnf)
                    duration_seconds = time() - t_start
                    solver_calls.append({
                        'time': duration_seconds,
                        'SAT': allowed
                    })
                    if not allowed:
                        unsat.append(v)

                # TODO: Count filtering SAT calls separately?

                # Filter out any potential trials with those vars set
                filtered_pts = []
                for pt in potential_trials:
                    if any(uv in pt for uv in unsat):
                        continue
                    else:
                        filtered_pts.append(pt)

                # Record the number filterd out for metrics
                trial_metrics['prefiltered_out'] = len(potential_trials) - len(
                    filtered_pts)
                potential_trials = filtered_pts

            allowed_trials = []
            for potential_trial in potential_trials:
                start_time = time()
                full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                    cnf_to_json([And(potential_trial)]))
                allowed = cnf_is_satisfiable(full_cnf)
                duration_seconds = time() - start_time

                solver_calls.append({'time': duration_seconds, 'SAT': allowed})

                if allowed:
                    allowed_trials.append(potential_trial)

            trial_metrics['allowed_trials'] = len(allowed_trials)
            trial_metrics['solver_call_count'] = len(solver_calls)
            sample_metrics['trials'].append(trial_metrics)

            # Randomly sample a single trial from the uniform distribution of the allowed trials,
            # and commit that trial to the committed sequence.
            trial_idx = np.random.randint(0, len(allowed_trials))
            committed.append(And(allowed_trials[trial_idx]))

            trial_metrics['time'] = time() - trial_start_time

        # Aggregate the total solver calls
        sample_metrics['solver_call_count'] = 0
        for tm in sample_metrics['trials']:
            sample_metrics['solver_call_count'] += tm['solver_call_count']

        # Flatten the committed trials into a list of integers and decode it.
        solution = GuidedSamplingStrategy.__committed_to_solution(committed)
        return SamplingStrategy.decode(block, solution)
Esempio n. 5
0
 def apply(self, block: Block, backend_request: BackendRequest) -> None:
     var_list = block.build_variable_list((self.factor, self.level))
     backend_request.cnfs.append(And(list(map(lambda n: n * -1, var_list))))
Esempio n. 6
0
 def apply(self, block: Block, backend_request: BackendRequest) -> None:
     if block.min_trials:
         block.min_trials = max([block.min_trials, self.trials])
     else:
         block.min_trials = self.trials
Esempio n. 7
0
 def _build_variable_sublists(self, block: Block, level: Tuple[Factor, Union[SimpleLevel, DerivedLevel]], sublist_length: int) -> List[List[int]]:
     var_list = block.build_variable_list(level)
     raw_sublists = [var_list[i:i+sublist_length] for i in range(0, len(var_list))]
     return list(filter(lambda l: len(l) == sublist_length, raw_sublists))
Esempio n. 8
0
 def is_complex(self, block: Block):
     return self.derived_idx < block.grid_variables()
Esempio n. 9
0
 def apply_to_backend_request(self, block: Block, level: Tuple[Factor, Union[SimpleLevel, DerivedLevel]], backend_request: BackendRequest) -> None:
     sublists = block.build_variable_list(level)
     backend_request.ll_requests.append(LowLevelRequest("EQ", self.k, sublists))
Esempio n. 10
0
    def sample(block: Block,
               sample_count: int,
               min_search: bool = False) -> SamplingResult:

        backend_request = block.build_backend_request()
        if block.errors:
            for e in block.errors:
                print(e)
                if "WARNING" not in e:
                    return SamplingResult([], {})

        solutions = sample_uniform(
            sample_count, CNF(backend_request.get_cnfs_as_json()),
            backend_request.fresh - 1, block.variables_per_sample(),
            backend_request.get_requests_as_generation_requests(), False)

        # This section deals with the problem caused by a corner case created
        # by at_least_k_in_a_row_constraint. I.e. in some cases this cotnraint
        # requires the support of a minimum_trials contraint to find valid
        # solutions. This will find the optimal minimum trials constraint to
        # the user using binary search with trial and error.
        if not solutions:
            from sweetpea.constraints import AtLeastKInARow
            if min_search:
                return SamplingResult([], {})
            else:
                atleast_constraints = cast(
                    List[AtLeastKInARow],
                    filter(lambda c: isinstance(c, AtLeastKInARow),
                           block.constraints))
                max_constraints = list(
                    map(lambda x: x.max_trials_required, atleast_constraints))

                if max_constraints:
                    print(
                        "No solution found... We require a minimum trials contraint to find a solution."
                    )
                    max_constraint = max(max_constraints)
                    min_constraint = block.trials_per_sample() + 1
                    original_min_trials = block.min_trials
                    last_valid_min_contraint = max_constraint
                    last_valid = SamplingResult([], {})
                    progress = tqdm(
                        total=ceil(log(max_constraint - min_constraint)) + 1,
                        file=sys.stdout)
                    while True:
                        current_constraint = int(
                            (max_constraint - min_constraint + 1) /
                            2) + min_constraint
                        block.min_trials = original_min_trials
                        c = minimum_trials(current_constraint)
                        c.validate(block)
                        c.apply(block, None)
                        block.constraints.append(c)
                        res = UnigenSamplingStrategy.sample(
                            block, sample_count, True)
                        progress.update(1)
                        if res.samples:
                            if current_constraint <= min_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    current_constraint, ".")
                                return res
                            else:
                                last_valid_min_contraint = current_constraint
                                last_valid = res
                                max_constraint = current_constraint - 1
                        else:
                            if max_constraint <= current_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    last_valid_min_contraint, ".")
                                return last_valid
                            else:
                                min_constraint = current_constraint + 1
                    progress.close()
                    return result
                else:
                    return SamplingResult([], {})

        result = list(
            map(lambda s: SamplingStrategy.decode(block, s.assignment),
                solutions))
        return SamplingResult(result, {})
Esempio n. 11
0
def __run_kinarow(c: Constraint, block: Block = block) -> BackendRequest:
    backend_request = BackendRequest(block.variables_per_sample() + 1)
    for dc in c.desugar():
        dc.apply(block, backend_request)
    return backend_request