Esempio n. 1
0
elapsed = np.zeros([1, num_iteration + initial_n])

init_data = HistoricalData(dim=objective_func._dim, num_derivatives=len(derivatives))
init_data.append_sample_points(
    [
        SamplePoint(
            pt,
            [init_pts_value[num, i] for i in observations],
            objective_func._sample_var,
        )
        for num, pt in enumerate(init_pts)
    ]
)

# initialize the model
prior = DefaultPrior(1 + dim + len(observations), len(observations))

# noisy = False means the underlying function being optimized is noise-free
cpp_gp_loglikelihood = cppGaussianProcessLogLikelihoodMCMC(
    historical_data=init_data,
    derivatives=derivatives,
    prior=prior,
    chain_length=1000,
    burnin_steps=2000,
    n_hypers=2 ** 4,
    noisy=noisy,
)
cpp_gp_loglikelihood.train()

py_sgd_params_ps = pyGradientDescentParameters(
    max_num_steps=1000,
Esempio n. 2
0
def main():

    args = docopt(__doc__)

    #Parse arguments
    mesh        =   args['<mesh>']
    weights     =   np.load(args['<weightfile>'])
    C           =   np.load(args['<quad_const>'])
    b           =   np.load(args['<bounds>'])
    R           =   np.load(args['<affine>'])
    coil        =   args['<coil>']
    loc_out     =   args['<loc_out>']
    rot_out     =   args['<rot_out>']
    cpus        =   int(args['--cpus']) or 8
    tmpdir      =   args['--tmp-dir'] or os.getenv('TMPDIR') or "/tmp/"
    num_iters   =   args['--n-iters'] or 50


    #Make search domain
    search_domain = TensorProductDomain([
            ClosedInterval(b[0,0],b[0,1]), #X coord on quadratic surface
            ClosedInterval(b[1,0],b[1,1]), #Y coord on quadratic surface
            ClosedInterval(0,180) #Rotational angle
            ])

    c_search_domain = cTensorProductDomain([
            ClosedInterval(b[0,0],b[0,1]), 
            ClosedInterval(b[1,0],b[1,1]),
            ClosedInterval(0,180)
            ])

    #Make objective function
    f = FieldFunc(mesh_file=mesh, quad_surf_consts=C,
                  surf_to_mesh_matrix=R, tet_weights=weights,
                  field_dir=tmpdir, coil=coil, cpus=cpus)


    #Generate historical points
    hist_pts = int(cpus * 1.5)
    init_pts = search_domain.generate_uniform_random_points_in_domain(hist_pts)
    observations = -f.evaluate(init_pts)
    hist_data = HistoricalData(dim = 3, num_derivatives= 0)
    hist_data.append_sample_points([SamplePoint(inp,o,0.0) 
                                    for o,inp in 
                                    zip(observations,init_pts)])

    #Set up model specifications
    prior = DefaultPrior(n_dims = 3 + 2, num_noise=1)
    gp_ll = GaussianProcessLogLikelihoodMCMC(historical_data=hist_data,
                                             derivatives=[], prior=prior,
                                             chain_length=1000, burnin_steps=2000,
                                             n_hypers=2**4, noisy=False)
    gp_ll.train()

    #Initialize grad desc params
    sgd_params = cGDParams(num_multistarts=200, max_num_steps=50,
                           max_num_restarts=2, num_steps_averaged=4,
                           gamma=0.7, pre_mult=1.0, max_relative_change=0.5,
                           tolerance=1.0e-10)

    num_samples = int(cpus*1.3)
    best_point_history = []
    for i in np.arange(0,num_iters):
            
        #Optimize qEI and pick samples
        points_to_sample, ei = gen_sample_from_qei(gp_ll.models[0],
                                                   c_search_domain, sgd_params=sgd_params,
                                                   num_samples=num_samples, num_mc=2**10)

        #Collect observations
        sampled_points = -f.evaluate(points_to_sample)
        evidence = [SamplePoint(c,v,0.0) for c,v in zip(points_to_sample, sampled_points)]

        #Update model
        gp_ll.add_sampled_points(evidence)
        gp_ll.train()

        #Pull model and pull values
        gp = gp_ll.models[0]
        min_point = np.argmin(gp._points_sampled_value)
        min_val = np.min(gp._points_sampled_value)
        best_coord = gp.get_historical_data_copy().points_sampled[min_point]

        print('Recommended Points:')
        print(points_to_sample)
        print('Expected Improvement: {}'.format(ei))
        print('Current Best:')
        print('f(x*)=',min_val)
        print('Coord:', best_coord)

        best_point_history.append(min_val)

    #Once sampling is done take the best point and transform it back into native space
    preaff_loc = geolib.map_param_2_surf(best_coord[0],best_coord[1],C)
    preaff_rot,_ = geolib.map_rot_2_surf(best_coord[0],best_coord[1],best_coord[2],C)
    loc = np.matmul(R,preaff_loc)
    rot = np.matmul(R,preaff_rot)
    np.savetxt(loc_out,loc)
    np.savetxt(rot_out,rot)
Esempio n. 3
0
def main():

    args = docopt(__doc__)

    # Parse arguments
    mesh = args['<mesh>']
    weights = np.load(args['<weightfile>'])
    init_centroid = np.genfromtxt(args['<init_centroid>'])
    coil = args['<coil>']
    output_file = args['<output_file>']
    cpus = int(args['--cpus']) or 8
    tmpdir = args['--tmp-dir'] or os.getenv('TMPDIR') or "/tmp/"
    num_iters = int(args['--n-iters']) or 50
    min_samps = int(args['--min-var-samps']) or 10
    tol = float(args['--convergence']) or 0.001
    history = args['--history']
    skip_convergence = args['--skip-convergence']
    options = args['--options']

    if options:
        with open(options, 'r') as f:
            opts = json.load(f)
        logging.info("Using custom options file {}".format(options))
        logging.info("{}".format('\''.join(
            [f"{k}:{v}" for k, v in opts.items()])))
    else:
        opts = {}

    logging.info('Using {} cpus'.format(cpus))

    f = FieldFunc(mesh_file=mesh,
                  initial_centroid=init_centroid,
                  tet_weights=weights,
                  coil=coil,
                  field_dir=tmpdir,
                  cpus=cpus,
                  **opts)

    # Make search domain
    search_domain = TensorProductDomain([
        ClosedInterval(f.bounds[0, 0], f.bounds[0, 1]),
        ClosedInterval(f.bounds[1, 0], f.bounds[1, 1]),
        ClosedInterval(0, 180)
    ])

    c_search_domain = cTensorProductDomain([
        ClosedInterval(f.bounds[0, 0], f.bounds[0, 1]),
        ClosedInterval(f.bounds[1, 0], f.bounds[1, 1]),
        ClosedInterval(0, 180)
    ])

    # Generate historical points
    prior = DefaultPrior(n_dims=3 + 2, num_noise=1)
    prior.tophat = TophatPrior(-2, 5)
    prior.ln_prior = NormalPrior(12.5, 1.6)
    hist_pts = cpus
    i = 0
    init_pts = search_domain.generate_uniform_random_points_in_domain(hist_pts)
    observations = -f.evaluate(init_pts)
    hist_data = HistoricalData(dim=3, num_derivatives=0)
    hist_data.append_sample_points(
        [SamplePoint(inp, o, 0.0) for o, inp in zip(observations, init_pts)])

    # Train GP model
    gp_ll = GaussianProcessLogLikelihoodMCMC(historical_data=hist_data,
                                             derivatives=[],
                                             prior=prior,
                                             chain_length=1000,
                                             burnin_steps=2000,
                                             n_hypers=2**4,
                                             noisy=False)
    gp_ll.train()

    # Initialize grad desc params
    sgd_params = cGDParams(num_multistarts=200,
                           max_num_steps=50,
                           max_num_restarts=5,
                           num_steps_averaged=4,
                           gamma=0.7,
                           pre_mult=1.0,
                           max_relative_change=0.5,
                           tolerance=1.0e-10)

    num_samples = int(cpus * 1.3)
    best_point_history = []

    # Sum of errors buffer
    var_buffer = deque(maxlen=min_samps)
    for i in np.arange(0, num_iters):

        # Optimize qEI and pick samples
        points_to_sample, ei = gen_sample_from_qei(gp_ll.models[0],
                                                   c_search_domain,
                                                   sgd_params=sgd_params,
                                                   num_samples=num_samples,
                                                   num_mc=2**10)

        # Collect observations
        sampled_points = -f.evaluate(points_to_sample)
        evidence = [
            SamplePoint(c, v, 0.0)
            for c, v in zip(points_to_sample, sampled_points)
        ]

        # Update model
        gp_ll.add_sampled_points(evidence)
        gp_ll.train()

        # Pull model and pull values
        gp = gp_ll.models[0]
        min_point = np.argmin(gp._points_sampled_value)
        min_val = np.min(gp._points_sampled_value)
        best_coord = gp.get_historical_data_copy().points_sampled[min_point]

        logging.info('Iteration {} of {}'.format(i, num_iters))
        logging.info('Recommended Points:')
        logging.info(points_to_sample)
        logging.info('Expected Improvement: {}'.format(ei))
        logging.info('Current Best:')
        logging.info(f'f(x*)= {min_val}')
        logging.info(f'Coord: {best_coord}')
        best_point_history.append(str(min_val))

        if history:
            with open(history, 'w') as buf:
                buf.write('\n'.join(best_point_history))

        # Convergence check
        if (len(var_buffer) == var_buffer.maxlen) and not skip_convergence:
            deviation = sum([abs(x - min_val) for x in var_buffer])
            if deviation < tol:
                logging.info('Convergence reached!')
                logging.info('Deviation: {}'.format(deviation))
                logging.info('History length: {}'.format(var_buffer.maxlen))
                logging.info('Tolerance: {}'.format(tol))
                break

        var_buffer.append(min_val)

    # Save position and orientation matrix
    np.savetxt(output_file, best_coord)
Esempio n. 4
0
    # observe
    derivatives[i] = objective_func._observations
    observations[i] = [0] + [j + 1 for j in derivatives[i]]
    init_pts_value[i] = np.array([objective_func.evaluate(pt) for pt in init_pts[i]])  # [:, observations]
    true_value_init[i] = np.array([objective_func.evaluate_true(pt) for pt in init_pts[i]])  # [:, observations]

    init_data[i] = HistoricalData(dim=objective_func._dim, num_derivatives=0)
    init_data[i].append_sample_points([SamplePoint(pt, [init_pts_value[i][num, j] for j in observations[i]],
                                                   objective_func._sample_var) for num, pt in enumerate(init_pts[i])])

prior = [0, 0, 0, 0]
cpp_gp_loglikelihood = [0, 0, 0, 0]
for i in range(4):
    # initialize the model
    prior[i] = DefaultPrior(1 + dim[i] + len(observations[i]), len(observations[i]))

    # noisy = False means the underlying function being optimized is noise-free
    cpp_gp_loglikelihood[i] = cppGaussianProcessLogLikelihoodMCMC(historical_data=init_data[i],
                                                                  derivatives=derivatives[i],
                                                                  prior=prior[i],
                                                                  chain_length=1000,
                                                                  burnin_steps=2000,
                                                                  n_hypers=2 ** 4,
                                                                  noisy=False)
    cpp_gp_loglikelihood[i].train()

py_sgd_params_ps = pyGradientDescentParameters(max_num_steps=1000,
                                               max_num_restarts=3,
                                               num_steps_averaged=15,
                                               gamma=0.7,