Exemple #1
0
def main():
    data = Sphere(dim=1)
    maxeval = 100

    kernel = CubicKernel
    tail = LinearTail

    for i in range(2):

        rbf1 = RBFInterpolant(kernel=kernel, tail=tail, maxp=maxeval, eta=0)
        rbf2 = RBFInterpolant(kernel=kernel, tail=tail, maxp=maxeval, eta='adapt')

        X = np.random.uniform(data.xlow, data.xup, (maxeval, data.dim))
        fX = np.zeros((maxeval, data.dim))
        for j in range(maxeval):
            fX[j] = data.objfunction(X[j, :])
            if i == 0:
                fX[j] += 3*np.cos(10000*X[j, :])
            rbf1.add_point(X[j, :], fX[j])
            rbf2.add_point(X[j, :], fX[j])

        Xpred = np.atleast_2d(np.linspace(data.xlow, data.xup, 1000)).transpose()
        fXpred = np.zeros((1000, data.dim))
        for j in range(1000):
            fXpred[j] = data.objfunction(Xpred[j, :])

        if i == 0:
            print("\nL2 error interpolation with noise: {0}".format(np.linalg.norm(rbf1.evals(Xpred) - fXpred)))
            print("L2 error regularization with noise: {0}".format(np.linalg.norm(rbf2.evals(Xpred) - fXpred)))
        else:
            print("L2 error interpolation without noise: {0}".format(np.linalg.norm(rbf1.evals(Xpred) - fXpred)))
            print("L2 error regularization without noise: {0}\n".format(np.linalg.norm(rbf2.evals(Xpred) - fXpred)))

        """
Exemple #2
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_matlab_engine.log"):
        os.remove("./logfiles/test_matlab_engine.log")
    logging.basicConfig(filename="./logfiles/test_matlab_engine.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 500")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 4
    maxeval = 500

    data = Ackley(dim=10)
    print(data.info)

    # Use the serial controller (uses only one thread)
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nthreads,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))

    print("\nNOTE: You may need to specify the matlab_root keyword in "
          "order \n      to start a MATLAB  session using the matlab_wrapper "
          "module\n")

    # We need to tell MATLAB where the script is
    mfile_location = os.getcwd()

    # Launch the threads
    for _ in range(nthreads):
        worker = MatlabWorker(controller)
        try:
            worker.matlab = matlab_wrapper.MatlabSession(options='-nojvm')
        except Exception as err:
            print("\nERROR: Failed to initialize a MATLAB session.\n")
            exit()

        worker.matlab.workspace.addpath(mfile_location)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    # Print the final result
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Exemple #3
0
def solve_opf_dycors_parallel(problem: AcOPFBlackBox,
                              maxeval=1000,
                              nthreads=4,
                              verbose=False):
    """

    :param problem:
    :return:
    """

    print(problem.info)

    # (2) Experimental design
    # Use a symmetric Latin hypercube with 2d + 1 samples
    exp_des = SymmetricLatinHypercube(dim=problem.dim,
                                      npts=2 * problem.dim + 1)

    # (3) Surrogate model
    # Use a cubic RBF interpolant with a linear tail
    surrogate = RBFInterpolant(kernel=CubicKernel,
                               tail=LinearTail,
                               maxp=maxeval)

    # (4) Adaptive sampling
    # Use DYCORS with 100d candidate points
    adapt_samp = CandidateDYCORS(data=problem, numcand=100 * problem.dim)

    # Use the threaded controller
    controller = ThreadController()

    # (5) Use the sychronous strategy without non-bound constraints
    # Use 4 threads and allow for 4 simultaneous evaluations

    strategy = SyncStrategyNoConstraints(worker_id=0,
                                         data=problem,
                                         maxeval=maxeval,
                                         nsamples=nthreads,
                                         exp_design=exp_des,
                                         response_surface=surrogate,
                                         sampling_method=adapt_samp)
    controller.strategy = strategy

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, problem.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    # Print the final result
    if verbose:
        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=5,
                         suppress_small=True)))

    return result.value
def optimization_trial(pname, data, epsilon, nthreads, maxeval, num):
    nsamples = nthreads
    print("Trial Number:" + str(num))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        MoSyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval),
            sampling_method=EvolutionaryAlgorithm(data,epsilons=epsilon, cand_flag=1),
            archiving_method=EpsilonArchive(size_max=200,epsilon=epsilon))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    def merit(r):
        return r.value[0]
    result = controller.run(merit=merit)

    # Save results to File
    X = np.loadtxt('final.txt')
    controller.strategy.save_plot(num)
    fname = pname + '_' + str(data.dim) + '_EGOMORS_'  + str(maxeval) + '_'  + str(num) + '_' + str(nthreads) + '.txt'
    np.savetxt(fname, X)
Exemple #5
0
def solve_opf_dycors_serial(problem: AcOPFBlackBox,
                            maxeval=1000,
                            verbose=False,
                            stop_at=False,
                            stop_value=0):
    """

    :param problem:
    :param maxeval:
    :param verbose:
    :param stop_at:
    :param stop_value:
    :return:
    """

    print(problem.info)

    # (2) Experimental design
    # Use a symmetric Latin hypercube with 2d + 1 samples
    exp_des = SymmetricLatinHypercube(dim=problem.dim,
                                      npts=2 * problem.dim + 1)

    # (3) Surrogate model
    # Use a cubic RBF interpolant with a linear tail
    surrogate = RBFInterpolant(kernel=CubicKernel,
                               tail=LinearTail,
                               maxp=maxeval)

    # (4) Adaptive sampling
    # Use DYCORS with 100d candidate points
    adapt_samp = CandidateDYCORS(data=problem, numcand=100 * problem.dim)

    # Use the serial controller (uses only one thread)
    controller = SerialController(problem.objfunction)

    # (5) Use the sychronous strategy without non-bound constraints
    strategy = SyncStrategyNoConstraints(worker_id=0,
                                         data=problem,
                                         maxeval=maxeval,
                                         nsamples=1,
                                         exp_design=exp_des,
                                         response_surface=surrogate,
                                         sampling_method=adapt_samp)
    controller.strategy = strategy

    # Run the optimization strategy
    result = controller.run(stop_at=stop_at, stop_value=stop_value)

    # Print the final result
    if verbose:
        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=5,
                         suppress_small=True)))

    # the result is x
    return result.value, result.params[0]
Exemple #6
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_penalty.log"):
        os.remove("./logfiles/test_penalty.log")
    logging.basicConfig(filename="./logfiles/test_penalty.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 500")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 4
    maxeval = 500
    penalty = 1e6
    nsamples = nthreads

    data = Keane(dim=10)
    print(data.info)

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyPenalty(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
            penalty=penalty)

    # Launch the threads
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Use penalty based merit
    def feasible_merit(record):
        xx = np.zeros((1, record.params[0].shape[0]))
        xx[0, :] = record.params[0]
        return record.value + controller.strategy.penalty_fun(xx)[0, 0]

    result = controller.run(merit=feasible_merit)
    best, xbest = result.value, result.params[0]

    print('Best value: {0}'.format(best))
    print('Best solution: {0}'.format(
        np.array_str(xbest,
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
    print('Feasible: {0}\n'.format(
        np.max(data.eval_ineq_constraints(xbest)) <= 0.0))
Exemple #7
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_projection.log"):
        os.remove("./logfiles/test_projection.log")
    logging.basicConfig(filename="./logfiles/test_projection.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 1000")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 4
    maxeval = 1000
    nsamples = nthreads

    data = AckleyUnit(dim=10)
    print(data.info)

    def projection(x):
        return x / np.linalg.norm(x)

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyProjection(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
            proj_fun=projection
        )

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
    print('||x||_2 = {0}\n'.format(np.linalg.norm(result.params[0])))
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_subprocess_files.log"):
        os.remove("./logfiles/test_subprocess_files.log")
    logging.basicConfig(filename="./logfiles/test_subprocess_files.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 200")
    print("Sampling method: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile("./sphere_ext_files"), "You need to build sphere_ext"

    nthreads = 4
    maxeval = 200
    nsamples = nthreads

    data = Sphere(dim=10)
    print(data.info)

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval))

    # Launch the threads and give them access to the objective function
    for i in range(nthreads):
        worker = CppSim(controller)
        worker.my_filename = str(i) + ".txt"
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Exemple #9
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_simple_time.log"):
        os.remove("./logfiles/test_simple_time.log")
    logging.basicConfig(filename="./logfiles/test_simple_time.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Time budget: 30 seconds")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 4
    maxeval = -30
    nsamples = nthreads

    data = Ackley(dim=10)
    print(data.info)

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=1000),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    start_time = time.time()
    result = controller.run()
    end_time = time.time()

    print('Run time: {0} seconds'.format(end_time - start_time))
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
Exemple #10
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_multisampling.log"):
        os.remove("./logfiles/test_multisampling.log")
    logging.basicConfig(filename="./logfiles/test_multisampling.log",
                        level=logging.INFO)

    print("\nNumber of threads: 1")
    print("Maximum number of evaluations: 500")
    print(
        "Sampling method: CandidateDYCORS, Genetic Algorithm, Multi-Start Gradient"
    )
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 1
    maxeval = 500
    nsamples = nthreads

    data = Ackley(dim=10)
    print(data.info)

    # Create a strategy and a controller
    sampling_method = [
        CandidateDYCORS(data=data, numcand=100 * data.dim),
        GeneticAlgorithm(data=data),
        MultiStartGradient(data=data)
    ]
    controller = SerialController(data.objfunction)
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim + 1)),
            sampling_method=MultiSampling(sampling_method, [0, 1, 0, 2]))

    result = controller.run()
    best, xbest = result.value, result.params[0]

    print('Best value: {0}'.format(best))
    print('Best solution: {0}\n'.format(
        np.array_str(xbest,
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Exemple #11
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_simple.log"):
        os.remove("./logfiles/test_simple.log")
    logging.basicConfig(filename="./logfiles/test_simple.log",
                        level=logging.INFO)

    nthreads = 4
    maxeval = 100
    nsamples = nthreads

    print("\nNumber of threads: " + str(nthreads))
    print("Maximum number of evaluations: " + str(maxeval))
    print("Sampling method: Mixed")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    #data = LZF3()
    data = DTLZ4(nobj=2)
    num = 1
    epsilons = [0.05, 0.05]
    # Create a strategy and a controller
    controller = ThreadController()
    #controller = SerialController(data.objfunction)
    controller.strategy = \
        MoSyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval),
            sampling_method=EvolutionaryAlgorithm(data,epsilons=epsilons, cand_flag=1), archiving_method=EpsilonArchive(size_max=200,epsilon=epsilons))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    def merit(r):
        return r.value[0]

    result = controller.run(merit=merit)

    controller.strategy.save_plot(num)
Exemple #12
0
def main_master(nworkers):
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_subprocess_mpi.log"):
        os.remove("./logfiles/test_subprocess_mpi.log")
    logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log",
                        level=logging.INFO)

    print(
        "\nTesting the POAP MPI controller with {0} workers".format(nworkers))
    print("Maximum number of evaluations: 200")
    print("Search strategy: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile("./sphere_ext"), "You need to build sphere_ext"

    maxeval = 200

    data = Sphere(dim=10)
    print(data.info)

    # Create a strategy and a controller
    strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nworkers,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval))

    controller = MPIController(strategy)

    # Run the optimization strategy
    result = controller.run()
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Exemple #13
0
        """
        if self.weights is None:
            self.compute_weights()

        val = 0.0
        for i in range(self.M):
            val += self.weights[i] * self.model_list[i].deriv(x, d)
        return val


if __name__ == "__main__":

    from pySOT import RBFInterpolant
    from pySOT import CubicRBFSurface, TPSSurface, LinearRBFSurface

    fhat1 = RBFInterpolant(CubicRBFSurface, 1e-8, 100)
    fhat2 = RBFInterpolant(TPSSurface, 1e-8, 100)
    fhat3 = RBFInterpolant(LinearRBFSurface, 1e-8, 100)

    models = [fhat1, fhat2, fhat3]
    fhat = EnsembleSurrogate(models, 10)

    def test_f(x):
        """Test function"""
        fx = x[1] * np.sin(x[0]) + x[0] * np.cos(x[1])
        return fx

    def test_df(x):
        """Derivative of test function"""
        dfx = np.array([
            x[1] * np.cos(x[0]) + np.cos(x[1]),
        """
        if self.weights is None:
            self.compute_weights()

        val = 0.0
        for i in range(self.M):
            val += self.weights[i] * self.model_list[i].deriv(x, d)
        return val


if __name__ == "__main__":

    from pySOT import RBFInterpolant
    from pySOT import CubicKernel, TPSKernel, LinearKernel, LinearTail, ConstantTail

    fhat1 = RBFInterpolant(CubicKernel, LinearTail, 100, 1e-8)
    fhat2 = RBFInterpolant(TPSKernel, LinearTail, 100, 1e-8)
    fhat3 = RBFInterpolant(LinearKernel, ConstantTail, 100, 1e-8)

    models = [fhat1, fhat2, fhat3]
    fhat = EnsembleSurrogate(models, 10)

    def test_f(x):
        """Test function"""
        fx = x[1] * np.sin(x[0]) + x[0] * np.cos(x[1])
        return fx

    def test_df(x):
        """Derivative of test function"""
        dfx = np.array([
            x[1] * np.cos(x[0]) + np.cos(x[1]),
Exemple #15
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_ensemble.log"):
        os.remove("./logfiles/test_ensemble.log")
    logging.basicConfig(filename="./logfiles/test_ensemble.log",
                        level=logging.INFO)

    print("\nNumber of threads: 5")
    print("Maximum number of evaluations: 250")
    print("Sampling method: CandidateSRBF")
    print("Experimental design: Symmetric Latin Hypercube + point [1,1,...,1]")
    print("Ensemble Surrogate: Cubic RBF, PolyReg")

    nthreads = 5
    maxeval = 250
    nsamples = nthreads

    data = Ackley(dim=5)
    print(data.info)

    # Use RBF + PolyReg
    bounds = np.vstack((data.xlow, data.xup)).T
    basisp = basis_TD(data.dim, 2)  # use order 2

    models = [
        RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
        PolyRegression(bounds, basisp)
    ]
    response_surface = EnsembleSurrogate(model_list=models, maxp=maxeval)

    # Add an additional point to the experimental design. If a good
    # solution is already known you can add this point to the
    # experimental design
    extra = np.ones((1, data.dim))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            response_surface=response_surface,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            sampling_method=CandidateSRBF(data=data, numcand=100*data.dim),
            extra=extra)

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    response_surface.compute_weights()
    print('Final weights: {0}'.format(
        np.array_str(response_surface.weights,
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Exemple #16
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_extra_vals.log"):
        os.remove("./logfiles/test_extra_vals.log")
    logging.basicConfig(filename="./logfiles/test_extra_vals.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 500")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    nthreads = 4
    maxeval = 500
    nsamples = nthreads

    data = Ackley(dim=10)
    print(data.info)

    nextra = 10
    extra = np.random.uniform(data.xlow, data.xup, (nextra, data.dim))
    extra_vals = np.nan * np.ones((nextra, 1))
    for i in range(nextra):  # Evaluate every second point
        if i % 2 == 0:
            extra_vals[i] = data.objfunction(extra[i, :])

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail,
                                            maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
            extra=extra, extra_vals=extra_vals)

    # Append the known function values to the POAP database since POAP won't evaluate these points
    for i in range(len(extra_vals)):
        if not np.isnan(extra_vals[i]):
            record = EvalRecord(params=(np.ravel(extra[i, :]), ),
                                status='completed')
            record.value = extra_vals[i]
            record.feasible = True
            controller.fevals.append(record)

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))