Beispiel #1
0
def collect_design_metrics(block: Block) -> Dict:
    backend_request = block.build_backend_request()
    dimacs_header = __generate_cnf(block).split('\n')[0].split(' ')

    return {
        'full_factor_count': len(block.design),
        'crossing_factor_count': len(block.crossing),
        'constraint_count': len(block.constraints),
        'block_length': block.trials_per_sample(),
        'block_length_factorial': factorial(block.trials_per_sample()),
        'low_level_request_count': len(backend_request.ll_requests),
        'cnf_total_variables': int(dimacs_header[2]),
        'cnf_total_clauses': int(dimacs_header[3])
    }
Beispiel #2
0
def collect_design_metrics(block: Block) -> Dict:
    """Given a block, this function will collect various metrics pertaining to
    the block and return them in a dictionary.
    """
    backend_request = block.build_backend_request()
    dimacs_header = __generate_cnf(block).split('\n')[0].split(' ')

    return {
        'full_factor_count': len(block.design),
        'crossing_factor_count': len(block.crossing),
        'constraint_count': len(block.constraints),
        'block_length': block.trials_per_sample(),
        'block_length_factorial': factorial(block.trials_per_sample()),
        'low_level_request_count': len(backend_request.ll_requests),
        'cnf_total_variables': int(dimacs_header[2]),
        'cnf_total_clauses': int(dimacs_header[3])
    }
Beispiel #3
0
    def __apply_derivation(self, block: Block, backend_request: BackendRequest) -> None:
        trial_size = block.variables_per_trial()
        cross_size = block.trials_per_sample()

        iffs = []
        for n in range(cross_size):
            or_clause = Or(list(And(list(map(lambda x: x + (n * trial_size) + 1, l))) for l in self.dependent_idxs))
            iffs.append(Iff(self.derived_idx + (n * trial_size) + 1, or_clause))

        (cnf, new_fresh) = block.cnf_fn(And(iffs), backend_request.fresh)

        backend_request.cnfs.append(cnf)
        backend_request.fresh = new_fresh
Beispiel #4
0
    def decode(block: Block, solution: List[int]) -> dict:
        # Sort the list and remove any negative (false) variables
        solution.sort()
        solution = list(filter(lambda v: v > 0, solution))

        # Separate into simple/complex variables.
        simple_variables = list(
            filter(lambda v: v <= block.grid_variables(), solution))
        complex_variables = list(
            filter(lambda v: v > block.grid_variables(), solution))

        experiment = cast(dict, {})

        # Simple factors
        tuples = list(map(lambda v: block.decode_variable(v),
                          simple_variables))
        string_tuples = list(
            map(lambda t: (t[0].factor_name, t[1].external_name), tuples))
        for (factor_name, level_name) in string_tuples:
            if factor_name not in experiment:
                experiment[factor_name] = []
            experiment[factor_name].append(level_name)

        # Complex factors - The challenge here is knowing when to insert '', rather than using the variables.
        # Start after 'width' trials, and shift 'stride' trials for each variable.
        complex_factors = list(
            filter(lambda f: f.has_complex_window, block.design))
        for f in complex_factors:
            # Get variables for this factor
            start = block.first_variable_for_level(f, f.levels[0]) + 1
            end = start + block.variables_for_factor(f)
            variables = list(
                filter(lambda n: n in range(start, end), complex_variables))

            # Get the level names for the variables in the solution.
            level_tuples = list(
                map(lambda v: block.decode_variable(v), variables))
            level_names = list(
                map(lambda t: (t[1].external_name), level_tuples))

            # Intersperse empty strings for the trials to which this factor does not apply.
            #level_names = list(intersperse('', level_names, f.levels[0].window.stride - 1))
            #level_names = list(repeat('', f.levels[0].window.width - 1)) + level_names
            level_names_fill = []
            for n in range(block.trials_per_sample()):
                level_names_fill.append(
                    level_names.pop(0) if f.applies_to_trial(n + 1) else '')
            experiment[f.factor_name] = level_names_fill

        return experiment
Beispiel #5
0
    def apply(block: Block, backend_request: BackendRequest) -> None:
        next_var = 1
        for _ in range(block.trials_per_sample()):
            for f in filter(lambda f: not f.has_complex_window, block.design):
                number_of_levels = len(f.levels)
                new_request = LowLevelRequest("EQ", 1, list(range(next_var, next_var + number_of_levels)))
                backend_request.ll_requests.append(new_request)
                next_var += number_of_levels

        for f in filter(lambda f: f.has_complex_window, block.design):
            variables_for_factor = block.variables_for_factor(f)
            var_list = list(map(lambda n: n + next_var, range(variables_for_factor)))
            chunks = list(chunk_list(var_list, len(f.levels)))
            backend_request.ll_requests += list(map(lambda v: LowLevelRequest("EQ", 1, v), chunks))
            next_var += variables_for_factor
Beispiel #6
0
    def __apply_derivation_with_complex_window(self, block: Block, backend_request: BackendRequest) -> None:
        trial_size = block.variables_per_trial()
        trial_count = block.trials_per_sample()
        iffs = []
        f = self.factor
        window = f.levels[0].window
        t = 0
        for n in range(trial_count):
            if not f.applies_to_trial(n + 1):
                continue

            num_levels = len(f.levels)
            get_trial_size = lambda x: trial_size if x < block.grid_variables() else len(block.decode_variable(x+1)[0].levels)
            or_clause = Or(list(And(list(map(lambda x: x + (t * window.stride * get_trial_size(x) + 1), l))) for l in self.dependent_idxs))
            iffs.append(Iff(self.derived_idx + (t * num_levels) + 1, or_clause))
            t += 1
        (cnf, new_fresh) = block.cnf_fn(And(iffs), backend_request.fresh)

        backend_request.cnfs.append(cnf)
        backend_request.fresh = new_fresh
Beispiel #7
0
def __count_solutions(block: Block) -> int:
    fc_size = block.trials_per_sample()
    return math.factorial(fc_size)
Beispiel #8
0
    def sample(block: Block,
               sample_count: int,
               min_search: bool = False) -> SamplingResult:

        backend_request = block.build_backend_request()
        if block.errors:
            for e in block.errors:
                print(e)
                if "WARNING" not in e:
                    return SamplingResult([], {})

        solutions = sample_uniform(
            sample_count, CNF(backend_request.get_cnfs_as_json()),
            backend_request.fresh - 1, block.variables_per_sample(),
            backend_request.get_requests_as_generation_requests(), False)

        if not solutions:
            from sweetpea.constraints import AtLeastKInARow
            if min_search:
                return SamplingResult([], {})
            else:
                max_constraints = list(
                    map(
                        lambda x: cast(AtLeastKInARow, x).max_trials_required,
                        filter(lambda c: isinstance(c, AtLeastKInARow),
                               block.constraints)))

                if max_constraints:
                    print(
                        "No solution found... We require a minimum trials contraint to find a solution."
                    )
                    max_constraint = max(max_constraints)
                    min_constraint = block.trials_per_sample() + 1
                    original_min_trials = block.min_trials
                    last_valid_min_contraint = max_constraint
                    last_valid = SamplingResult([], {})
                    progress = tqdm(total=math.ceil(
                        math.log(max_constraint - min_constraint)) + 1,
                                    file=sys.stdout)
                    while True:
                        current_constraint = int(
                            (max_constraint - min_constraint + 1) /
                            2) + min_constraint
                        block.min_trials = original_min_trials
                        c = minimum_trials(current_constraint)
                        c.validate(block)
                        c.apply(block, None)
                        block.constraints.append(c)
                        res = UnigenSamplingStrategy.sample(
                            block, sample_count, True)
                        progress.update(1)
                        if res.samples:
                            if current_constraint <= min_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    current_constraint, ".")
                                return res
                            else:
                                last_valid_min_contraint = current_constraint
                                last_valid = res
                                max_constraint = current_constraint - 1
                        else:
                            if max_constraint <= current_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    last_valid_min_contraint, ".")
                                return last_valid
                            else:
                                min_constraint = current_constraint + 1
                    progress.close()
                    return result
                else:
                    return SamplingResult([], {})

        result = list(
            map(lambda s: SamplingStrategy.decode(block, s.assignment),
                solutions))
        return SamplingResult(result, {})
Beispiel #9
0
    def __generate_sample(block: Block, cnf: CNF,
                          sample_metrics: dict) -> dict:
        sample_metrics['trials'] = []

        # Start a 'committed' list of CNFs
        committed = cast(List[And], [])

        for trial_number in range(block.trials_per_sample()):
            trial_start_time = time()

            trial_metrics = {'t': trial_number + 1, 'solver_calls': []}
            solver_calls = cast(List[dict], trial_metrics['solver_calls'])

            #  Get the variable list for this trial.
            variables = block.variable_list_for_trial(trial_number + 1)
            variables = list(filter(lambda i: i != [], variables))
            potential_trials = list(map(list, product(*variables)))

            trial_metrics['potential_trials'] = len(potential_trials)

            # Use env var to switch between filtering and not
            if GuidedSamplingStrategy.__prefilter_enabled():
                # Flatten the list
                flat_vars = list(chain(*variables))

                # Check SAT for each one
                unsat = []
                for v in flat_vars:
                    t_start = time()
                    full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                        cnf_to_json([And([v])]))
                    allowed = cnf_is_satisfiable(full_cnf)
                    duration_seconds = time() - t_start
                    solver_calls.append({
                        'time': duration_seconds,
                        'SAT': allowed
                    })
                    if not allowed:
                        unsat.append(v)

                # TODO: Count filtering SAT calls separately?

                # Filter out any potential trials with those vars set
                filtered_pts = []
                for pt in potential_trials:
                    if any(uv in pt for uv in unsat):
                        continue
                    else:
                        filtered_pts.append(pt)

                # Record the number filterd out for metrics
                trial_metrics['prefiltered_out'] = len(potential_trials) - len(
                    filtered_pts)
                potential_trials = filtered_pts

            allowed_trials = []
            for potential_trial in potential_trials:
                start_time = time()
                full_cnf = cnf + CNF(cnf_to_json(committed)) + CNF(
                    cnf_to_json([And(potential_trial)]))
                allowed = cnf_is_satisfiable(full_cnf)
                duration_seconds = time() - start_time

                solver_calls.append({'time': duration_seconds, 'SAT': allowed})

                if allowed:
                    allowed_trials.append(potential_trial)

            trial_metrics['allowed_trials'] = len(allowed_trials)
            trial_metrics['solver_call_count'] = len(solver_calls)
            sample_metrics['trials'].append(trial_metrics)

            # Randomly sample a single trial from the uniform distribution of the allowed trials,
            # and commit that trial to the committed sequence.
            trial_idx = np.random.randint(0, len(allowed_trials))
            committed.append(And(allowed_trials[trial_idx]))

            trial_metrics['time'] = time() - trial_start_time

        # Aggregate the total solver calls
        sample_metrics['solver_call_count'] = 0
        for tm in sample_metrics['trials']:
            sample_metrics['solver_call_count'] += tm['solver_call_count']

        # Flatten the committed trials into a list of integers and decode it.
        solution = GuidedSamplingStrategy.__committed_to_solution(committed)
        return SamplingStrategy.decode(block, solution)
Beispiel #10
0
def __generate_encoding_diagram(blk: Block) -> str:
    diagram_str = ""

    design_size = blk.variables_per_trial()
    num_trials = blk.trials_per_sample()
    num_vars = blk.variables_per_sample()

    largest_number_len = len(str(num_vars))

    header_widths = []
    row_format_str = '| {:>7} |'
    for f in blk.design:
        # length of all levels concatenated for this factor
        level_names = list(map(get_external_level_name, f.levels))
        level_name_widths = [
            max(largest_number_len, l) for l in list(map(len, level_names))
        ]

        level_names_width = sum(level_name_widths) + len(
            level_names) - 1  # Extra length for spaces in between names.
        factor_header_width = max(len(f.factor_name), level_names_width)
        header_widths.append(factor_header_width)

        # If the header is longer than the level widths combined, then they need to be lengthened.
        diff = factor_header_width - level_names_width
        if diff > 0:
            idx = 0
            while diff > 0:
                level_name_widths[idx] += 1
                idx += 1
                diff -= 1
                if idx >= len(level_name_widths):
                    idx = 0

        # While we're here, build up the row format str.
        row_format_str = reduce(lambda a, b: a + ' {{:^{}}}'.format(b),
                                level_name_widths, row_format_str)
        row_format_str += ' |'

    header_format_str = reduce(lambda a, b: a + ' {{:^{}}} |'.format(b),
                               header_widths, '| {:>7} |')
    factor_names = list(map(lambda f: f.factor_name, blk.design))
    header_str = header_format_str.format(*["Trial"] + factor_names)
    row_width = len(header_str)

    # First line
    diagram_str += ('-' * row_width) + '\n'

    # Header
    diagram_str += header_str + '\n'

    # Level names
    all_level_names = [
        ln for (fn, ln) in get_all_external_level_names(blk.design)
    ]
    diagram_str += row_format_str.format(*['#'] + all_level_names) + '\n'

    # Separator
    diagram_str += ('-' * row_width) + '\n'

    # Variables
    for t in range(num_trials):
        args = [str(t + 1)]
        for f in blk.design:
            if f.applies_to_trial(t + 1):
                variables = [
                    blk.first_variable_for_level(f, l) + 1 for l in f.levels
                ]
                if f.has_complex_window():

                    def acc_width(w) -> int:
                        return w.width + (
                            acc_width(w.args[0].levels[0].window) -
                            1 if w.args[0].has_complex_window() else 0)

                    width = acc_width(f.levels[0].window)
                    stride = f.levels[0].window.stride
                    stride_offset = (stride - 1) * int(t / stride)
                    offset = t - width + 1 - stride_offset
                    variables = list(
                        map(lambda n: n + len(variables) * offset, variables))
                else:
                    variables = list(
                        map(lambda n: n + design_size * t, variables))

                args += list(map(str, variables))
            else:
                args += list(repeat('', len(f.levels)))

        diagram_str += row_format_str.format(*args) + '\n'

    # Footer
    diagram_str += ('-' * row_width) + '\n'
    return diagram_str
Beispiel #11
0
    def sample(block: Block,
               sample_count: int,
               min_search: bool = False) -> SamplingResult:

        backend_request = block.build_backend_request()
        if block.errors:
            for e in block.errors:
                print(e)
                if "WARNING" not in e:
                    return SamplingResult([], {})

        solutions = sample_uniform(
            sample_count, CNF(backend_request.get_cnfs_as_json()),
            backend_request.fresh - 1, block.variables_per_sample(),
            backend_request.get_requests_as_generation_requests(), False)

        # This section deals with the problem caused by a corner case created
        # by at_least_k_in_a_row_constraint. I.e. in some cases this cotnraint
        # requires the support of a minimum_trials contraint to find valid
        # solutions. This will find the optimal minimum trials constraint to
        # the user using binary search with trial and error.
        if not solutions:
            from sweetpea.constraints import AtLeastKInARow
            if min_search:
                return SamplingResult([], {})
            else:
                atleast_constraints = cast(
                    List[AtLeastKInARow],
                    filter(lambda c: isinstance(c, AtLeastKInARow),
                           block.constraints))
                max_constraints = list(
                    map(lambda x: x.max_trials_required, atleast_constraints))

                if max_constraints:
                    print(
                        "No solution found... We require a minimum trials contraint to find a solution."
                    )
                    max_constraint = max(max_constraints)
                    min_constraint = block.trials_per_sample() + 1
                    original_min_trials = block.min_trials
                    last_valid_min_contraint = max_constraint
                    last_valid = SamplingResult([], {})
                    progress = tqdm(
                        total=ceil(log(max_constraint - min_constraint)) + 1,
                        file=sys.stdout)
                    while True:
                        current_constraint = int(
                            (max_constraint - min_constraint + 1) /
                            2) + min_constraint
                        block.min_trials = original_min_trials
                        c = minimum_trials(current_constraint)
                        c.validate(block)
                        c.apply(block, None)
                        block.constraints.append(c)
                        res = UnigenSamplingStrategy.sample(
                            block, sample_count, True)
                        progress.update(1)
                        if res.samples:
                            if current_constraint <= min_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    current_constraint, ".")
                                return res
                            else:
                                last_valid_min_contraint = current_constraint
                                last_valid = res
                                max_constraint = current_constraint - 1
                        else:
                            if max_constraint <= current_constraint:
                                print(
                                    "Optimal minimum trials contraint is at ",
                                    last_valid_min_contraint, ".")
                                return last_valid
                            else:
                                min_constraint = current_constraint + 1
                    progress.close()
                    return result
                else:
                    return SamplingResult([], {})

        result = list(
            map(lambda s: SamplingStrategy.decode(block, s.assignment),
                solutions))
        return SamplingResult(result, {})