コード例 #1
0
def _test_plot_data():
    domain = Domain.make(["a"], ["x", "y"], [(0, 1), (0, 1)])
    a, x, y = domain.get_symbols(["a", "x", "y"])
    formula = a | (~a & (x <= y))
    data = uniform(domain, 100)
    labels = evaluate(domain, formula, data)
    mpl.use('Agg')
    plot_data(None, domain, (data, labels))
    assert True
コード例 #2
0
ファイル: core.py プロジェクト: mboehme/learn2fix
 def select_active(
         self, domain, data, labels, formula,
         active_indices) -> Tuple[np.ndarray, np.ndarray, List[int]]:
     active_set = set(active_indices)
     learned_labels = evaluate(domain, formula, data)
     differences = np.logical_xor(labels, learned_labels)
     difference_set = set(np.where(differences)[0])
     # print(active_set)
     # print(difference_set)
     # print(pretty_print(formula))
     # for i in active_set & difference_set:
     #     print(i)
     #     print(pretty_print_instance(domain, data[i]))
     #     print(labels[i], learned_labels[i])
     # print()
     # assert len(active_set & difference_set) == 0
     return data, labels, sorted(difference_set - active_set)
コード例 #3
0
from inspect import signature

import numpy as np

from smtlearn.examples import ice_cream_problem
from pywmi.plot import plot_data, plot_formula
from pywmi.sample import uniform
from pywmi.smt_check import evaluate
import random
from smtlearn.violations.core import RandomViolationsStrategy
from smtlearn.k_cnf_smt_learner import KCnfSmtLearner
from pywmi.smt_print import pretty_print

random.seed(666)
np.random.seed(666)

domain, formula, name = ice_cream_problem()
# plot_formula(None, domain, formula)

data = uniform(domain, 100)
labels = evaluate(domain, formula, data)

learner = KCnfSmtLearner(3, 3, RandomViolationsStrategy(10))
initial_indices = random.sample(range(data.shape[0]), 20)

learned_theory = learner.learn(domain, data, labels, initial_indices)
print(pretty_print(learned_theory))
コード例 #4
0
mutated_failing = []
mutated_failing.append(trainer_test_suite[0])

oracle_committee = []

timeout = time.time() + 60 * timeout
if debug: print("[INFO] Start learning (iteration: "+str(iteration)+")...")

while time.time() < timeout and n_human_labeled < max_labels:
    if np.random.randint(0,100) < 50:
      seed_input = mutated_failing[np.random.randint(len(mutated_failing))][:-1]
    else:
      seed_input = trainer_test_suite[0][:-1]
    fuzzed_test_case = getMutatedTestCase(trainer_test_suite, seed_input)
    predict_label = evaluate(domain,learned_model,np.array(fuzzed_test_case))
    n_generated += 1
    if False == ask_human(fuzzed_test_case[:-1], bug_dir, bug_prog, gold_prog):
        n_failing += 1

    all_fail_prob = (trainer_labels.size - np.sum(trainer_labels)) / trainer_labels.size

    # If the current test_input is predicted as failing
    # OR if we have mostly labled failing test cases and the current test input is predicted as passing
    if predict_label == False or (trainer_labels.size > 10 and all_fail_prob > 0.9 and predict_label == True):
        # ask human to label, and
        human_label = ask_human(fuzzed_test_case[:-1], bug_dir, bug_prog, gold_prog)
        # add to trainer_test_suite
        trainer_test_suite = np.append(trainer_test_suite, [fuzzed_test_case], axis=0)
        trainer_labels = np.append(trainer_labels, [human_label], axis=0)
        # re-train automated oracle