예제 #1
0
def generate_bulk_scenarios(the_conn, cursor):
    # Test a number of issues...
    for the_num_issues in range(2, 6):
        # In some domain size...
        for the_domain_size in range(2, 10):
            # For some number of hyper-rectangles...
            for the_num_constraints in range(2, 10):
                cursor.execute("SELECT COUNT(*) FROM scenarios WHERE num_issues = " + str(the_num_issues) +
                               " AND domain_size = " + str(the_domain_size) + " AND num_constraints = " + str(the_num_constraints))
                data = cursor.fetchall()
                # Repeat the experiments a few times... (only as necessary)
                for t in range(0, max(0, 10 - data[0][0])):
                    # Generate a uniform random scenario
                    scenario_id = scenario_factory.generate_uniform_random_scenario(the_num_issues, the_domain_size, the_num_constraints)
                    # Print something so we know that there is progress
                    print("t = " + str(t) + ", num_issues = " + str(the_num_issues) + ", domain_size = " + str(the_domain_size) +
                          ", num_constraints = " + str(the_num_constraints) + ". Id = ", scenario_id)
                    cursor.execute("INSERT INTO scenarios (xml_file, num_issues, domain_size, num_constraints) "
                                   "VALUES (" + "'" + scenario_id + "'" + "," + str(the_num_issues) + "," + str(the_domain_size) + ", " + str(the_num_constraints) + ")")
                    the_conn.commit()
예제 #2
0
# To create a polynomial kernel we need the following imports.
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import Exponentiation
from skopt.learning.gaussian_process.kernels import DotProduct
from skopt.learning.gaussian_process.kernels import Sum
from skopt.learning.gaussian_process.kernels import Product
from skopt.learning.gaussian_process.kernels import ConstantKernel

random_scenario = False

if random_scenario:
    # Generate random scenario
    num_issues = 2
    domain_size = 5
    num_constraints = 8
    scenario_id = generate_uniform_random_scenario(num_issues, domain_size,
                                                   num_constraints)
else:
    # Read scenario
    scenario_id = '17D8316EE9'
    num_issues = 1
    domain_size = 10
    num_constraints = 5

print('Bayesian mediator social for scenario id = ', scenario_id)
scenario = ET.parse(
    util.get_scenario_path(num_issues, domain_size, num_constraints) +
    scenario_id + '.xml')
u_funcs = xml.get_ufuns(scenario)

# TODO: data structure for bounds are inconsistent among multiple mediators. Should have the same data structure everywhere.
lower_bounds = [