예제 #1
0
def get_random_gp_data(space_dim, num_is, num_data_each_is, kernel_name):
    """ Generate random gp data
    :param space_dim:
    :param num_is:
    :param num_data_each_is:
    :param kernel_name: currently it's either 'mix_exp' or 'prod_ker'
    :return:
    """
    sample_var = 0.01
    if kernel_name == "mix_exp":
        hyper_params = numpy.random.uniform(size=(num_is + 1) *
                                            (space_dim + 1))
        cov = MixedSquareExponential(hyper_params, space_dim + 1, num_is)
    elif kernel_name == "prod_ker":
        hyper_params = numpy.random.uniform(size=(num_is + 1) *
                                            (num_is + 2) / 2 + space_dim + 1)
        cov = ProductKernel(hyper_params, space_dim + 1, num_is + 1)
    else:
        raise NotImplementedError("invalid kernel")
    python_search_domain = pythonTensorProductDomain([
        ClosedInterval(bound[0], bound[1])
        for bound in numpy.repeat([[-10., 10.]], space_dim + 1, axis=0)
    ])
    data = HistoricalData(space_dim + 1)
    init_pts = python_search_domain.generate_uniform_random_points_in_domain(2)
    init_pts[:, 0] = numpy.zeros(2)
    data.append_historical_data(init_pts, numpy.zeros(2),
                                numpy.ones(2) * sample_var)
    gp = GaussianProcess(cov, data)
    points = python_search_domain.generate_uniform_random_points_in_domain(
        num_data_each_is)
    for pt in points:
        for i in range(num_is):
            pt[0] = i
            val = gp.sample_point_from_gp(pt, sample_var)
            data.append_sample_points([
                [pt, val, sample_var],
            ])
            gp = GaussianProcess(cov, data)
    return hyper_params, data
예제 #2
0
 def generate_data(self, num_data):
     python_search_domain = pythonTensorProductDomain([
         ClosedInterval(bound[0], bound[1])
         for bound in self._info_dict['search_domain']
     ])
     data = HistoricalData(self._info_dict['dim'])
     init_pts = python_search_domain.generate_uniform_random_points_in_domain(
         2)
     init_pts[:, 0] = numpy.zeros(2)
     data.append_historical_data(init_pts, numpy.zeros(2),
                                 numpy.ones(2) * self._sample_var_1)
     gp = GaussianProcess(self._cov, data)
     points = python_search_domain.generate_uniform_random_points_in_domain(
         num_data)
     for pt in points:
         pt[0] = numpy.ceil(numpy.random.uniform(high=2.0, size=1))
         sample_var = self._sample_var_1 if pt[
             0] == 1 else self._sample_var_2
         val = gp.sample_point_from_gp(pt, sample_var)
         data.append_sample_points([
             [pt, val, sample_var],
         ])
         gp = GaussianProcess(self._cov, data)
     return data
예제 #3
0
argv = sys.argv[1:]
if argv[0].find("pes") < 0:
    raise ValueError("benchmark is not pes!")
problem = identify_problem(argv, bucket)

# Transform data to (0,1)^d space
lower_bounds = problem.obj_func_min._search_domain[:, 0]
upper_bounds = problem.obj_func_min._search_domain[:, 1]
transformed_data = HistoricalData(problem.obj_func_min.getDim() + 1)
for pt, val, var in zip(problem.hist_data.points_sampled,
                        problem.hist_data.points_sampled_value,
                        problem.hist_data.points_sampled_noise_variance):
    transformed_data.append_sample_points([
        [
            numpy.concatenate(
                ([pt[0]], scale_forward(pt[1:], lower_bounds, upper_bounds))),
            val, var
        ],
    ])


# entropy search begins
def noise_func(IS, x):
    return problem.obj_func_min.noise_and_cost_func(IS, x)[0]


def cost_func(IS, x):
    return problem.obj_func_min.noise_and_cost_func(IS, x)[1]


print problem.hyper_param
예제 #4
0
    [objective_func.evaluate_true(pt) for pt in init_pts]
)  # [:, observations]

# Collecting Data
s_suggest = np.array(init_pts)
f_s_suggest = np.array(init_pts_value).reshape(initial_n, 1)
s_recommend = np.array(init_pts)
f_s_recommend = np.array(true_value_init).reshape(initial_n, 1)
elapsed = np.zeros([1, num_iteration + initial_n])

init_data = HistoricalData(dim=objective_func._dim, num_derivatives=len(derivatives))
init_data.append_sample_points(
    [
        SamplePoint(
            pt,
            [init_pts_value[num, i] for i in observations],
            objective_func._sample_var,
        )
        for num, pt in enumerate(init_pts)
    ]
)

# initialize the model
prior = DefaultPrior(1 + dim + len(observations), len(observations))

# noisy = False means the underlying function being optimized is noise-free
cpp_gp_loglikelihood = cppGaussianProcessLogLikelihoodMCMC(
    historical_data=init_data,
    derivatives=derivatives,
    prior=prior,
    chain_length=1000,
    burnin_steps=2000,
예제 #5
0
def main():

    args = docopt(__doc__)

    # Parse arguments
    mesh = args['<mesh>']
    weights = np.load(args['<weightfile>'])
    init_centroid = np.genfromtxt(args['<init_centroid>'])
    coil = args['<coil>']
    output_file = args['<output_file>']
    cpus = int(args['--cpus']) or 8
    tmpdir = args['--tmp-dir'] or os.getenv('TMPDIR') or "/tmp/"
    num_iters = int(args['--n-iters']) or 50
    min_samps = int(args['--min-var-samps']) or 10
    tol = float(args['--convergence']) or 0.001
    history = args['--history']
    skip_convergence = args['--skip-convergence']
    options = args['--options']

    if options:
        with open(options, 'r') as f:
            opts = json.load(f)
        logging.info("Using custom options file {}".format(options))
        logging.info("{}".format('\''.join(
            [f"{k}:{v}" for k, v in opts.items()])))
    else:
        opts = {}

    logging.info('Using {} cpus'.format(cpus))

    f = FieldFunc(mesh_file=mesh,
                  initial_centroid=init_centroid,
                  tet_weights=weights,
                  coil=coil,
                  field_dir=tmpdir,
                  cpus=cpus,
                  **opts)

    # Make search domain
    search_domain = TensorProductDomain([
        ClosedInterval(f.bounds[0, 0], f.bounds[0, 1]),
        ClosedInterval(f.bounds[1, 0], f.bounds[1, 1]),
        ClosedInterval(0, 180)
    ])

    c_search_domain = cTensorProductDomain([
        ClosedInterval(f.bounds[0, 0], f.bounds[0, 1]),
        ClosedInterval(f.bounds[1, 0], f.bounds[1, 1]),
        ClosedInterval(0, 180)
    ])

    # Generate historical points
    prior = DefaultPrior(n_dims=3 + 2, num_noise=1)
    prior.tophat = TophatPrior(-2, 5)
    prior.ln_prior = NormalPrior(12.5, 1.6)
    hist_pts = cpus
    i = 0
    init_pts = search_domain.generate_uniform_random_points_in_domain(hist_pts)
    observations = -f.evaluate(init_pts)
    hist_data = HistoricalData(dim=3, num_derivatives=0)
    hist_data.append_sample_points(
        [SamplePoint(inp, o, 0.0) for o, inp in zip(observations, init_pts)])

    # Train GP model
    gp_ll = GaussianProcessLogLikelihoodMCMC(historical_data=hist_data,
                                             derivatives=[],
                                             prior=prior,
                                             chain_length=1000,
                                             burnin_steps=2000,
                                             n_hypers=2**4,
                                             noisy=False)
    gp_ll.train()

    # Initialize grad desc params
    sgd_params = cGDParams(num_multistarts=200,
                           max_num_steps=50,
                           max_num_restarts=5,
                           num_steps_averaged=4,
                           gamma=0.7,
                           pre_mult=1.0,
                           max_relative_change=0.5,
                           tolerance=1.0e-10)

    num_samples = int(cpus * 1.3)
    best_point_history = []

    # Sum of errors buffer
    var_buffer = deque(maxlen=min_samps)
    for i in np.arange(0, num_iters):

        # Optimize qEI and pick samples
        points_to_sample, ei = gen_sample_from_qei(gp_ll.models[0],
                                                   c_search_domain,
                                                   sgd_params=sgd_params,
                                                   num_samples=num_samples,
                                                   num_mc=2**10)

        # Collect observations
        sampled_points = -f.evaluate(points_to_sample)
        evidence = [
            SamplePoint(c, v, 0.0)
            for c, v in zip(points_to_sample, sampled_points)
        ]

        # Update model
        gp_ll.add_sampled_points(evidence)
        gp_ll.train()

        # Pull model and pull values
        gp = gp_ll.models[0]
        min_point = np.argmin(gp._points_sampled_value)
        min_val = np.min(gp._points_sampled_value)
        best_coord = gp.get_historical_data_copy().points_sampled[min_point]

        logging.info('Iteration {} of {}'.format(i, num_iters))
        logging.info('Recommended Points:')
        logging.info(points_to_sample)
        logging.info('Expected Improvement: {}'.format(ei))
        logging.info('Current Best:')
        logging.info(f'f(x*)= {min_val}')
        logging.info(f'Coord: {best_coord}')
        best_point_history.append(str(min_val))

        if history:
            with open(history, 'w') as buf:
                buf.write('\n'.join(best_point_history))

        # Convergence check
        if (len(var_buffer) == var_buffer.maxlen) and not skip_convergence:
            deviation = sum([abs(x - min_val) for x in var_buffer])
            if deviation < tol:
                logging.info('Convergence reached!')
                logging.info('Deviation: {}'.format(deviation))
                logging.info('History length: {}'.format(var_buffer.maxlen))
                logging.info('Tolerance: {}'.format(tol))
                break

        var_buffer.append(min_val)

    # Save position and orientation matrix
    np.savetxt(output_file, best_coord)
예제 #6
0
result = numpy.zeros((num_iteration, 6))
best_so_far_kg = numpy.zeros((end_idx - start_idx, num_iteration + 1))

# begin job
for job_no in xrange(start_idx, end_idx):
    python_search_domain = pythonTensorProductDomain([
        ClosedInterval(bound[0], bound[1])
        for bound in objective_func._search_domain
    ])

    init_value = [objective_func.evaluate(pt) for pt in init_pts]

    init_data = HistoricalData(objective_func._dim, 1)
    init_data.append_sample_points([
        SamplePoint(pt, init_value[num], objective_func._sample_var)
        for num, pt in enumerate(init_pts)
    ])

    #best_so_far_kg[job_no-start_idx, 0] = objective_func.evaluate_true(init_data.points_sampled[numpy.argmin(init_data.points_sampled_value)])
    print "best so far {0}".format(best_so_far_kg[job_no - start_idx, 0])

    init_data_nogradient = HistoricalData(objective_func._dim, 0)
    init_data_nogradient.append_sample_points([
        SamplePoint(pt, init_value[num][0], objective_func._sample_var)
        for num, pt in enumerate(init_pts)
    ])

    cpp_cov_nograd = cppSquareExponential(hyper_params)
    cpp_gp_nogradient = cppGaussianProcess(cpp_cov_nograd,
                                           numpy.ones(1) * 0.0001,
                                           init_data_nogradient, [])
예제 #7
0
def main():

    args = docopt(__doc__)

    #Parse arguments
    mesh        =   args['<mesh>']
    weights     =   np.load(args['<weightfile>'])
    C           =   np.load(args['<quad_const>'])
    b           =   np.load(args['<bounds>'])
    R           =   np.load(args['<affine>'])
    coil        =   args['<coil>']
    loc_out     =   args['<loc_out>']
    rot_out     =   args['<rot_out>']
    cpus        =   int(args['--cpus']) or 8
    tmpdir      =   args['--tmp-dir'] or os.getenv('TMPDIR') or "/tmp/"
    num_iters   =   args['--n-iters'] or 50


    #Make search domain
    search_domain = TensorProductDomain([
            ClosedInterval(b[0,0],b[0,1]), #X coord on quadratic surface
            ClosedInterval(b[1,0],b[1,1]), #Y coord on quadratic surface
            ClosedInterval(0,180) #Rotational angle
            ])

    c_search_domain = cTensorProductDomain([
            ClosedInterval(b[0,0],b[0,1]), 
            ClosedInterval(b[1,0],b[1,1]),
            ClosedInterval(0,180)
            ])

    #Make objective function
    f = FieldFunc(mesh_file=mesh, quad_surf_consts=C,
                  surf_to_mesh_matrix=R, tet_weights=weights,
                  field_dir=tmpdir, coil=coil, cpus=cpus)


    #Generate historical points
    hist_pts = int(cpus * 1.5)
    init_pts = search_domain.generate_uniform_random_points_in_domain(hist_pts)
    observations = -f.evaluate(init_pts)
    hist_data = HistoricalData(dim = 3, num_derivatives= 0)
    hist_data.append_sample_points([SamplePoint(inp,o,0.0) 
                                    for o,inp in 
                                    zip(observations,init_pts)])

    #Set up model specifications
    prior = DefaultPrior(n_dims = 3 + 2, num_noise=1)
    gp_ll = GaussianProcessLogLikelihoodMCMC(historical_data=hist_data,
                                             derivatives=[], prior=prior,
                                             chain_length=1000, burnin_steps=2000,
                                             n_hypers=2**4, noisy=False)
    gp_ll.train()

    #Initialize grad desc params
    sgd_params = cGDParams(num_multistarts=200, max_num_steps=50,
                           max_num_restarts=2, num_steps_averaged=4,
                           gamma=0.7, pre_mult=1.0, max_relative_change=0.5,
                           tolerance=1.0e-10)

    num_samples = int(cpus*1.3)
    best_point_history = []
    for i in np.arange(0,num_iters):
            
        #Optimize qEI and pick samples
        points_to_sample, ei = gen_sample_from_qei(gp_ll.models[0],
                                                   c_search_domain, sgd_params=sgd_params,
                                                   num_samples=num_samples, num_mc=2**10)

        #Collect observations
        sampled_points = -f.evaluate(points_to_sample)
        evidence = [SamplePoint(c,v,0.0) for c,v in zip(points_to_sample, sampled_points)]

        #Update model
        gp_ll.add_sampled_points(evidence)
        gp_ll.train()

        #Pull model and pull values
        gp = gp_ll.models[0]
        min_point = np.argmin(gp._points_sampled_value)
        min_val = np.min(gp._points_sampled_value)
        best_coord = gp.get_historical_data_copy().points_sampled[min_point]

        print('Recommended Points:')
        print(points_to_sample)
        print('Expected Improvement: {}'.format(ei))
        print('Current Best:')
        print('f(x*)=',min_val)
        print('Coord:', best_coord)

        best_point_history.append(min_val)

    #Once sampling is done take the best point and transform it back into native space
    preaff_loc = geolib.map_param_2_surf(best_coord[0],best_coord[1],C)
    preaff_rot,_ = geolib.map_rot_2_surf(best_coord[0],best_coord[1],best_coord[2],C)
    loc = np.matmul(R,preaff_loc)
    rot = np.matmul(R,preaff_rot)
    np.savetxt(loc_out,loc)
    np.savetxt(rot_out,rot)