def main_neighbor(args, mp_n):
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree
    ]
    config = parse_configuration(args.config[0])
    with open(args.output[0], 'w') as fout:
        fout.write('')
    call = "python3 SynthPrefGen.py -o " + args.output[0] + " "
    prob = list(map(lambda x: str(x), args.problem))
    call += '-p ' + ' '.join(prob) + ' '
    if len(args.learn_conf) == 1:
        call += '-i ' + args.learn_conf[0] + ' '
    call += args.config[0] + " >> timing.dat"
    runs = 25
    label = ''
    for holder in config[1]:
        label += pill_label(agent_types, holder, config[0])
    label += ';' + str(config[1][0].size)
    with open(args.output[0], 'a') as fout:
        fout.write('(' + label + ')')
    start = time.time()
    pool = mp.Pool(mp_n)
    pool.map(sys_call_wait, [call for i in range(runs)])
    with open(args.output[0], 'a') as fout:
        fout.write("\n")
    print("Full Time:", time.time() - start)
    return 0
예제 #2
0
def main(args):
    print(args)
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    agents = []
    # Build agents.
    for agent in config[1]:
        agents.append(make_agent(agent, agent_types, config[0]))
    # Write agents, if applicable
    if args.agent_folder is not None:
        if not os.path.isdir(args.agent_folder[0]):
            os.mkdir(args.agent_folder[0])
        for agent in agents:
            a_file = args.agent_folder[0] + "/agent" + str(
                agent[0].id) + ".pref"
            with open(a_file, 'w') as fout:
                fout.write(str(agent[0].model))
    # Build example set.
    ex_set = ExampleSet()
    for agent in agents:
        temp_set = build_example_set(agent[0], agent[1], config[0])
        ex_set.add_example_list(temp_set.example_list())
    # Write example set to file.
    with open(args.output[0], 'w') as fout:
        fout.write(str(config[0]) + "\n")
        for example in ex_set.example_list():
            fout.write(str(example) + "\n")
    return
def main(args, mp_n):
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree
    ]
    config = parse_configuration(args.config[0])
    with open(args.output[0], 'w') as fout:
        fout.write('')
    for holder in config[1]:
        with open('temp_agent.config', 'w') as fout:
            fout.write(str(config[0]) + "\n")
            fout.write(str(holder))
        call = "python3 SynthPrefGen.py -l " + str(args.layers[0]) + ' '
        prob = list(map(lambda x: str(x), args.problem))
        call += '-p ' + ' '.join(prob) + ' '
        if len(args.learn_conf) == 1:
            call += '-i ' + args.learn_conf[0] + ' '
        call += "-o " + args.output[0] + " "
        call += "temp_agent.config >> timing.dat"
        runs = 25
        print(call)
        with open(args.output[0], 'a') as fout:
            fout.write('(' + pill_label(agent_types, holder, config[0]) + ';' +
                       str(holder.size) + ')')
        pool = mp.Pool(mp_n)
        pool.map(sys_call_wait, [call for i in range(runs)])
        with open(args.output[0], 'a') as fout:
            fout.write("\n")
    return 0
예제 #4
0
def main_hillclimb_rr(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]

    info = {}
    l_class = None
    if len(args.learn_conf) == 1:
        l_config = parse_configuration(args.learn_conf[0])
        info = l_config[1][0].info
        for type in agent_types:
            if l_config[1][0].type.lower() == type.string_id().lower():
                l_class = type
    else:
        info['clauses'] = 1
        info['literals'] = 1
        info['ranks'] = 5
        l_class = RankingPrefFormula

    runs = 400
    max_eval = 0
    start = time()
    stats = [100]
    agents = []
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    for i in range(runs):
        learner = l_class.random(config[0], info)
        learner = SA.hillclimb(learner, ex_set, SA.evaluate_util)
        # learner = SA.hillclimb(learner, ex_set, SA.evaluate_maximin)
        eval = SA.evaluate_util(learner, ex_set)
        # eval = SA.evaluate_maximin(learner, ex_set)
        if eval > max_eval:
            max_eval = eval
        if (i + 1) % (stats[0]) == 0:
            stats.append(max_eval)
    stats = list(map(lambda x: str(x), stats))
    with open(args.output[0], 'a') as fout:
        fout.write(',(' + ';'.join(stats) + ')')
    print("Time:", time() - start)
예제 #5
0
def main_build_neighbor_monte_carlo(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]

    info = {}
    l_class = None
    if len(args.learn_conf) == 1:
        l_config = parse_configuration(args.learn_conf[0])
        info = l_config[1][0].info
        for type in agent_types:
            if l_config[1][0].type.lower() == type.string_id().lower():
                l_class = type
    else:
        info['clauses'] = 1
        info['literals'] = 1
        info['ranks'] = 5
        l_class = RankingPrefFormula

    results = []
    runs = 250
    start = time()
    agents = []
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    for i in range(runs):
        learner = l_class.random(config[0], info)
        # learner = SA.hillclimb(learner, ex_set, SA.evaluate_util)
        learner = SA.hillclimb(learner, ex_set, SA.evaluate_maximin)
        # results.append(SA.evaluate_util(learner, ex_set))
        results.append(SA.evaluate_maximin(learner, ex_set))
    average_maxima = 0.0
    for i in results:
        average_maxima += i
    average_maxima = average_maxima / (len(results))
    stats = [runs, min(results), average_maxima, max(results)]
    stats = list(map(lambda x: str(x), stats))
    with open(args.output[0], 'a') as fout:
        fout.write(',(' + ';'.join(stats) + ')')
    print("Time:", time() - start)
예제 #6
0
def main_learn_joint_SA_mm(args):
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    config = parse_configuration(args.config[0])
    info = {}
    l_class = None
    if len(args.learn_conf) == 1:
        l_config = parse_configuration(args.learn_conf[0])
        info = l_config[1][0].info
        for type in agent_types:
            if l_config[1][0].type.lower() == type.string_id().lower():
                l_class = type
    else:
        info['clauses'] = 1
        info['literals'] = 1
        info['ranks'] = 5
        l_class = RankingPrefFormula

    agents = []
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    for train, valid in ex_set.crossvalidation(5):
        start = time()
        learner = l_class.random(config[0], info)
        # learner = LPM.random(config[0], info)
        learner = learn_SA_mm(learner, train)
        print(time() - start)
        training = evaluate_multi(train, learner)
        validation = evaluate_multi(valid, learner)
        training = ';'.join(list(map(lambda x: str(x), training)))
        validation = ';'.join(list(map(lambda x: str(x), validation)))
        temp = ';'.join([training, validation])
        with open(args.output[0], 'a') as fout:
            fout.write(',(' + temp + ')')
        del temp
        del learner
        del train
        del valid
    del ex_set
예제 #7
0
def main_learn_SA(args):
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    config = parse_configuration(args.config[0])
    info = {}
    l_class = None
    if len(args.learn_conf) == 1:
        l_config = parse_configuration(args.learn_conf[0])
        info = l_config[1][0].info
        for type in agent_types:
            if l_config[1][0].type.lower() == type.string_id().lower():
                l_class = type
    else:
        info['clauses'] = 1
        info['literals'] = 1
        info['ranks'] = 5
        l_class = RankingPrefFormula

    for holder in config[1]:
        agent = make_agent(holder, agent_types, config[0])
        ex_set = build_example_set(agent[0], agent[1], config[0])
        proportion = ex_proport(ex_set)
        proportion = list(map(lambda x: str(x), proportion))
        proportion = ';'.join(proportion)
        for train, valid in ex_set.crossvalidation(5):
            start = time()
            learner = l_class.random(config[0], info)
            # learner = RankingPrefFormula.random(config[0],info)
            # learner = LPM.random(config[0], info)
            learner = learn_SA(learner, train)
            print(time() - start)
            training = evaluate_rep(train, learner)
            validation = evaluate_rep(valid, learner)
            temp = ';'.join([str(training), str(validation), proportion])
            with open(args.output[0], 'a') as fout:
                fout.write(',(' + temp + ')')
예제 #8
0
def main_build_neighbor(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    info = {}
    info['clauses'] = 1
    info['literals'] = 1
    info['ranks'] = 3
    agents = []
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    n_graph = NeighborGraph()
    agents = list(map(lambda x: x[0], agents))
    start = time()
    for rpf in RankingPrefFormula.each(config[0], info):
        # eval = evaluate_rep_full_multi(agents, rpf, config[0])
        # eval = evaluate_rep_full_maximin(agents, rpf, config[0])
        # eval = evaluate_rep(ex_set, rpf)
        eval = SA.evaluate_maximin(rpf, ex_set)
        n_graph.add_node(rpf.node_str(), eval)
        for neighbor in rpf.neighbors():
            n_graph.add_arc(rpf.node_str(), neighbor.node_str())
    maxima = n_graph.local_maxima()
    average_maxima = 0.0
    min_maxima = None
    for node in maxima:
        average_maxima += n_graph.get(node)
        if min_maxima is None or n_graph.get(node) < min_maxima:
            min_maxima = n_graph.get(node)
    average_maxima = average_maxima / (len(maxima))
    stats = [len(maxima), min_maxima, average_maxima, n_graph.global_maxima()]
    stats = list(map(lambda x: str(x), stats))
    with open(args.output[0], 'a') as fout:
        fout.write(',(' + ';'.join(stats) + ')')
    print("Time:", time() - start)
    print("Local Minima Count:", len(n_graph.local_minima()))
    print("Local Maxima Count:", len(maxima))
    print("Minimum Local Maxima:", min_maxima)
    print("Average Local Maxima Value:", average_maxima)
    print("Maximum:", n_graph.global_maxima())
    print("Minimum:", n_graph.global_minima())
    print("Average:", n_graph.average())
    print("Number of Nodes:", len(n_graph))
예제 #9
0
def main_learn_joint_nn(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    info = {}
    agents = []
    learn_device = None
    # with open(args.output[0],'w') as fout:
    #     fout.write('')
    if torch.cuda.is_available():
        learn_device = torch.device('cuda')
    else:
        learn_device = torch.device('cpu')
    layers = [256, 256, 256]
    layer_cut = max(0, args.layers[0])
    layers = layers[:layer_cut]
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    for train, valid in ex_set.crossvalidation(5):
        # train.to_tensors(learn_device)
        # valid.to_tensors(learn_device)
        start = time()
        learner = train_neural_preferences(train, layers, 1000, config[0],
                                           learn_device)
        # learner.to(eval_device)
        learner.eval()
        training = evaluate_cuda_multi(train, learner, learn_device)
        validation = evaluate_cuda_multi(valid, learner, learn_device)
        print(time() - start)
        # pills.append('(' + str(training) + ';' + str(validation) + ')')
        training = ';'.join(list(map(lambda x: str(x), training)))
        validation = ';'.join(list(map(lambda x: str(x), validation)))
        temp = ';'.join([training, validation])
        with open(args.output[0], 'a') as fout:
            fout.write(',(' + temp + ')')
        torch.cuda.empty_cache()
        del temp
        del learner
        del train
        del valid
    del ex_set
예제 #10
0
def main_learn_lpm_full(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    for holder in config[1]:
        agent = make_agent(holder, agent_types, config[0])
        ex_set = build_example_set(agent[0], agent[1], config[0])
        for train, valid in ex_set.crossvalidation(5):
            start = time()
            learner = LPM.learn_greedy(train, config[0])
            print(time() - start)
            training = evaluate_rep(train, learner)
            validation = evaluate_rep(valid, learner)
            full_eval = evaluate_rep_full(agent[0], learner, config[0])
            temp = ';'.join([str(training), str(validation), str(full_eval)])
            with open(args.output[0], 'a') as fout:
                fout.write(',(' + temp + ')')
예제 #11
0
def main_learn_nn(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    agents = []
    learn_device = None
    # with open(args.output[0],'w') as fout:
    #     fout.write('')
    if torch.cuda.is_available():
        learn_device = torch.device('cuda')
    else:
        learn_device = torch.device('cpu')
    layers = [256, 256, 256]
    layer_cut = max(0, args.layers[0])
    layers = layers[:layer_cut]
    for holder in config[1]:
        single_run(args, holder, agent_types, config, layers, learn_device)
예제 #12
0
def main_learn_joint_lpm_mm(args):
    config = parse_configuration(args.config[0])
    agent_types = [
        LPM, RankingPrefFormula, PenaltyLogic, WeightedAverage, CPnet, CLPM,
        LPTree, ASO
    ]
    agents = []
    for holder in config[1]:
        agents.append(make_agent(holder, agent_types, config[0]))
    ex_set = build_example_set_multi(agents, config[0])
    for train, valid in ex_set.crossvalidation(5):
        start = time()
        learner = LPM.learn_greedy_maximin(train, config[0])
        print(time() - start)
        training = evaluate_multi(train, learner)
        validation = evaluate_multi(valid, learner)
        training = ';'.join(list(map(lambda x: str(x), training)))
        validation = ';'.join(list(map(lambda x: str(x), validation)))
        temp = ';'.join([training, validation])
        with open(args.output[0], 'a') as fout:
            fout.write(',(' + temp + ')')