def init():
    print("\nInitializing run...")

    rbf = RBFInterpolant(
        dim=ackley.dim, kernel=CubicKernel(),
        tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True, batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    result = controller.run()
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
Пример #2
0
    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(
            dim=self.opt.dim,
            lb=self.opt.lb,
            ub=self.opt.ub,
            kernel=CubicKernel(),
            tail=LinearTail(self.opt.dim),
            eta=1e-4,
        )

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )
Пример #3
0
def test_srbf_async():
    max_evals = 200
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=None)

    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)
    controller.run()

    check_strategy(controller)
def init():
    print("\nInitializing run...")
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    print("Number of workers: 1")
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    result = controller.run()
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #5
0
    def __init__(self, worker_id, data, response_surface, maxeval, nsamples,
                 exp_design=None, sampling_method=None, extra=None, extra_vals=None):

        # Check stopping criterion
        self.start_time = time.time()
        if maxeval < 0:  # Time budget
            self.maxeval = np.inf
            self.time_budget = np.abs(maxeval)
        else:
            self.maxeval = maxeval
            self.time_budget = np.inf

        # Import problem information
        self.worker_id = worker_id
        self.data = data
        self.fhat = response_surface
        if self.fhat is None:
            self.fhat = RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval)
        self.fhat.reset()  # Just to be sure!

        self.nsamples = nsamples
        self.extra = extra
        self.extra_vals = extra_vals

        # Default to generate sampling points using Symmetric Latin Hypercube
        self.design = exp_design
        if self.design is None:
            if self.data.dim > 50:
                self.design = LatinHypercube(data.dim, data.dim+1)
            else:
                self.design = SymmetricLatinHypercube(data.dim, 2*(data.dim+1))

        self.xrange = np.asarray(data.xup - data.xlow)

        # algorithm parameters
        self.sigma_min = 0.005
        self.sigma_max = 0.2
        self.sigma_init = 0.2

        self.failtol = max(5, data.dim)
        self.succtol = 3

        self.numeval = 0
        self.status = 0
        self.sigma = 0
        self.resubmitter = RetryStrategy()
        self.xbest = None
        self.fbest = np.inf
        self.fbest_old = None

        # Set up search procedures and initialize
        self.sampling = sampling_method
        if self.sampling is None:
            self.sampling = CandidateDYCORS(data)

        self.check_input()

        # Start with first experimental design
        self.sample_initial()
Пример #6
0
def test_slhd():
    for i in range(10, 12):  # To test even and odd
        slhd = SymmetricLatinHypercube(dim=3, num_pts=i)
        X = slhd.generate_points()
        assert isinstance(slhd, ExperimentalDesign)
        assert np.all(X.shape == (i, 3))
        assert slhd.num_pts == i
        assert slhd.dim == 3
Пример #7
0
def example_extra_vals():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_extra_vals.log"):
        os.remove("./logfiles/example_extra_vals.log")
    logging.basicConfig(filename="./logfiles/example_extra_vals.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    num_extra = 10
    extra = np.random.uniform(ackley.lb, ackley.ub, (num_extra, ackley.dim))
    extra_vals = np.nan * np.ones((num_extra, 1))
    for i in range(num_extra):  # Evaluate every second point
        if i % 2 == 0:
            extra_vals[i] = ackley.eval(extra[i, :])

    rbf = RBFInterpolant(dim=ackley.dim, kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True, batch_size=num_threads,
        extra_points=extra, extra_vals=extra_vals)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Append the known function values to the POAP database since
    # POAP won't evaluate these points
    for i in range(len(extra_vals)):
        if not np.isnan(extra_vals[i]):
            record = EvalRecord(
                params=(np.ravel(extra[i, :]),), status='completed')
            record.value = extra_vals[i]
            record.feasible = True
            controller.fevals.append(record)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
Пример #8
0
def example_sop():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_simple.log"):
        os.remove("./logfiles/example_simple.log")
    logging.basicConfig(filename="./logfiles/example_simple.log",
                        level=logging.INFO)

    print("\nNumber of threads: 8")
    print("Maximum number of evaluations: 500")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    num_threads = 8
    max_evals = 500

    ackley = Ackley(dim=10)
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SOPStrategy(
        max_evals=max_evals,
        opt_prob=ackley,
        exp_design=slhd,
        surrogate=rbf,
        asynchronous=False,
        ncenters=num_threads,
        batch_size=num_threads,
    )

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #9
0
def example_matlab_engine():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_matlab_engine.log"):
        os.remove("./logfiles/example_matlab_engine.log")
    logging.basicConfig(filename="./logfiles/example_matlab_engine.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Use the serial controller (uses only one thread)
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads
    for _ in range(num_threads):
        try:
            worker = MatlabWorker(controller)
            worker.matlab = matlab.engine.start_matlab()
            controller.launch_worker(worker)
        except Exception as e:
            print("\nERROR: Failed to initialize a MATLAB session.\n")
            print(str(e))
            return

    # Run the optimization strategy
    result = controller.run()

    # Print the final result
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #10
0
def example_subprocess_files():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_subprocess_files.log"):
        os.remove("./logfiles/example_subprocess_files.log")
    logging.basicConfig(filename="./logfiles/example_subprocess_files.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 200")
    print("Sampling method: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile(path), "You need to build sphere_ext_files"

    num_threads = 4
    max_evals = 200

    sphere = Sphere(dim=10)
    rbf = RBFInterpolant(dim=sphere.dim,
                         kernel=TPSKernel(),
                         tail=LinearTail(sphere.dim))
    slhd = SymmetricLatinHypercube(dim=sphere.dim,
                                   num_pts=2 * (sphere.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=sphere,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=False,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for i in range(num_threads):
        worker = CppSim(controller)
        worker.my_filename = str(i) + ".txt"
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #11
0
def test_slhd_round():
    num_pts = 10
    dim = 3
    lb = np.array([1, 2, 3])
    ub = np.array([3, 4, 5])
    int_var = np.array([1])

    np.random.seed(0)
    slhd = SymmetricLatinHypercube(dim=dim, num_pts=num_pts)
    X = slhd.generate_points(lb=lb, ub=ub, int_var=int_var)
    assert np.all(np.round(X[:, 1] == X[:, 1]))  # Should be integers
    assert np.all(np.max(X, axis=0) == ub)
    assert np.all(np.min(X, axis=0) == lb)
def example_subprocess_partial_info():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_subprocess_partial_info.log"):
        os.remove("./logfiles/example_subprocess_partial_info.log")
    logging.basicConfig(
        filename="./logfiles/example_subprocess_partial_info.log",
        level=logging.INFO)

    assert os.path.isfile(path), "You need to build sumfun_ext"

    num_threads = 4
    max_evals = 200

    sumfun = SumfunExt(dim=10)
    rbf = RBFInterpolant(dim=sumfun.dim,
                         lb=sumfun.lb,
                         ub=sumfun.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(sumfun.dim))
    slhd = SymmetricLatinHypercube(dim=sumfun.dim,
                                   num_pts=2 * (sumfun.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=sumfun,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        controller.launch_worker(CppSim(controller))

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #13
0
def test_lcb_serial():
    max_evals = 50
    gp = GPRegressor(dim=ackley.dim)
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = LCBStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=gp, asynchronous=True)
    controller.run()

    check_strategy(controller)
Пример #14
0
def main_master(num_workers):
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_subprocess_mpi.log"):
        os.remove("./logfiles/test_subprocess_mpi.log")
    logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log",
                        level=logging.INFO)

    print("\nTesting the POAP MPI controller with {0} workers".format(
        num_workers))
    print("Maximum number of evaluations: 200")
    print("Search strategy: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile(path), "You need to build sphere_ext"

    max_evals = 200

    sphere = Sphere(dim=10)
    rbf = RBFInterpolant(dim=sphere.dim,
                         kernel=CubicKernel(),
                         tail=LinearTail(sphere.dim))
    slhd = SymmetricLatinHypercube(dim=sphere.dim,
                                   num_pts=2 * (sphere.dim + 1))

    # Create a strategy and a controller
    strategy = SRBFStrategy(max_evals=max_evals,
                            opt_prob=sphere,
                            exp_design=slhd,
                            surrogate=rbf,
                            asynchronous=True,
                            batch_size=num_workers)
    controller = MPIController(strategy)

    print("Number of threads: {}".format(num_workers))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Run the optimization strategy
    result = controller.run()
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #15
0
def example_mars():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_mars.log"):
        os.remove("./logfiles/example_mars.log")
    logging.basicConfig(filename="./logfiles/example_mars.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 200

    ackley = Ackley(dim=5)
    try:
        mars = MARSInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub)
    except Exception as e:
        print(str(e))
        return
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=mars,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(mars.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #16
0
def test_srbf_serial():
    max_evals = 200
    rbf = RBFInterpolant(
        dim=ackley.dim, kernel=CubicKernel(),
        tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True)
    controller.run()

    check_strategy(controller)
Пример #17
0
def init_serial():
    rbf = RBFInterpolant(dim=ackley.dim,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                         opt_prob=ackley,
                                         exp_design=slhd,
                                         surrogate=rbf,
                                         asynchronous=True)

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    controller.run()
Пример #18
0
def test_lcb_async():
    max_evals = 50
    gp = GPRegressor(dim=ackley.dim)
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = LCBStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=gp, asynchronous=True, batch_size=None)

    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)
    controller.run()

    check_strategy(controller)
def example_lower_confidence_bounds():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_lower_confidence_bounds.log"):
        os.remove("./logfiles/example_lower_confidence_bounds.log")
    logging.basicConfig(
        filename="./logfiles/example_lower_confidence_bounds.log",
        level=logging.INFO)

    num_threads = 4
    max_evals = 100

    hart6 = Hartman6()
    gp = GPRegressor(dim=hart6.dim)
    slhd = SymmetricLatinHypercube(dim=hart6.dim, num_pts=2 * (hart6.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = LCBStrategy(max_evals=max_evals,
                                      opt_prob=hart6,
                                      exp_design=slhd,
                                      surrogate=gp,
                                      asynchronous=True)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(gp.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, hart6.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #20
0
def test_example_simple():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_simple.log"):
        os.remove("./logfiles/example_simple.log")
    logging.basicConfig(filename="./logfiles/example_simple.log",
                        level=logging.INFO)

    num_threads = 2
    max_evals = 50

    ackley = Ackley(dim=10)
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #21
0
def main_master(opt_prob, num_workers):
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/mpiexample_mpi.log"):
        os.remove("./logfiles/mpiexample_mpi.log")
    logging.basicConfig(filename="./logfiles/mpiexample_mpi.log",
                        level=logging.INFO)

    max_evals = 500

    rbf = RBFInterpolant(dim=opt_prob.dim,
                         lb=opt_prob.lb,
                         ub=opt_prob.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(opt_prob.dim))
    slhd = SymmetricLatinHypercube(dim=opt_prob.dim,
                                   num_pts=2 * (opt_prob.dim + 1))

    # Create a strategy and a controller
    strategy = SRBFStrategy(
        max_evals=max_evals,
        opt_prob=opt_prob,
        exp_design=slhd,
        surrogate=rbf,
        asynchronous=True,
        batch_size=num_workers,
    )
    controller = MPIController(strategy)

    print("Number of workers: {}".format(num_workers))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    result = controller.run()
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Пример #22
0
def pysot_cube(objective, scale, n_trials, n_dim, with_count=False):

    if False:
        if not os.path.exists("./logfiles"):
            os.makedirs("logfiles")
        if os.path.exists("./logfiles/example_simple.log"):
            os.remove("./logfiles/example_simple.log")
        logging.basicConfig(filename="./logfiles/example_simple.log",
                            level=logging.INFO)

    num_threads = 2
    max_evals = n_trials
    gp = GenericProblem(dim=n_dim, objective=objective, scale=scale)
    rbf = RBFInterpolant(dim=n_dim,
                         lb=np.array([-scale] * n_dim),
                         ub=np.array([scale] * n_dim),
                         kernel=CubicKernel(),
                         tail=LinearTail(n_dim))
    slhd = SymmetricLatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=gp,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, gp.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
    return (result.value, gp.feval_count) if with_count else result.value
Пример #23
0
    def fit(self, X, y=None, **kwargs):
        """Run training with cross validation.

        :param X: training data
        :param **: parameters to be passed to GridSearchCV
        """

        # wrap for pySOT
        class Target(OptimizationProblem):
            def __init__(self, outer):
                self.outer = outer
                param_def = outer.param_def
                self.lb = np.array([param['lb'] for param in param_def])
                self.ub = np.array([param['ub'] for param in param_def])
                self.dim = len(param_def)
                self.int_var = np.array([
                    idx for idx, param in enumerate(param_def)
                    if param['integer']
                ])
                self.cont_var = np.array([
                    idx for idx, param in enumerate(param_def)
                    if idx not in self.int_var
                ])

            def eval_(self, x):
                print('Eval {0} ...'.format(x))
                param_def = self.outer.param_def
                outer = self.outer
                # prepare parameters grid for gridsearchcv
                param_grid = ({
                    param['name']:
                    [int(x[idx]) if param['integer'] else x[idx]]
                    for idx, param in enumerate(param_def)
                })
                # create gridsearchcv to evaluate the cv
                gs = GridSearchCV(outer.estimator,
                                  param_grid,
                                  refit=False,
                                  **outer.kwargs)
                # never refit during iteration, refit at the end
                gs.fit(X, y=y, **kwargs)
                gs_score = gs.best_score_
                # gridsearchcv score is better when greater
                if not outer.best_score_ or gs_score > outer.best_score_:
                    outer.best_score_ = gs_score
                    outer.best_params_ = gs.best_params_
                # also record history
                outer.params_history_.append(x)
                outer.score_history_.append(gs_score)
                print('Eval {0} => {1}'.format(x, gs_score))
                # pySOT score is the lower the better, so return the negated
                return -gs_score

        # pySOT routine
        # TODO: make this configurable
        target = Target(self)
        rbf = SurrogateUnitBox(RBFInterpolant(dim=target.dim,
                                              kernel=CubicKernel(),
                                              tail=LinearTail(target.dim)),
                               lb=target.lb,
                               ub=target.ub)
        slhd = SymmetricLatinHypercube(dim=target.dim,
                                       num_pts=2 * (target.dim + 1))

        # Create a strategy and a controller
        controller = SerialController(objective=target.eval_)
        controller.strategy = SRBFStrategy(max_evals=self.n_iter,
                                           batch_size=1,
                                           opt_prob=target,
                                           exp_design=slhd,
                                           surrogate=rbf,
                                           asynchronous=False)

        print('Maximum number of evaluations: {0}'.format(self.n_iter))
        print('Strategy: {0}'.format(controller.strategy.__class__.__name__))
        print('Experimental design: {0}'.format(slhd.__class__.__name__))
        print('Surrogate: {0}'.format(rbf.__class__.__name__))

        # Run the optimization strategy
        result = controller.run()

        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}\n'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=5,
                         suppress_small=True)))
Пример #24
0
    def setup_backend(
        self,
        params,
        strategy="SRBF",
        surrogate="RBF",
        design=None,
    ):
        self.opt_problem = BBoptOptimizationProblem(params)

        design_kwargs = dict(dim=self.opt_problem.dim)
        _coconut_case_match_to_1 = design
        _coconut_case_match_check_1 = False
        if _coconut_case_match_to_1 is None:
            _coconut_case_match_check_1 = True
        if _coconut_case_match_check_1:
            self.exp_design = EmptyExperimentalDesign(**design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = LatinHypercube(num_pts=2 *
                                                 (self.opt_problem.dim + 1),
                                                 **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "symmetric_latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = SymmetricLatinHypercube(
                    num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "two_factorial":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = TwoFactorial(**design_kwargs)
        if not _coconut_case_match_check_1:
            _coconut_match_set_name_design_cls = _coconut_sentinel
            _coconut_match_set_name_design_cls = _coconut_case_match_to_1
            _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                if _coconut_match_set_name_design_cls is not _coconut_sentinel:
                    design_cls = _coconut_case_match_to_1
            if _coconut_case_match_check_1 and not (callable(design_cls)):
                _coconut_case_match_check_1 = False
            if _coconut_case_match_check_1:
                self.exp_design = design_cls(**design_kwargs)
        if not _coconut_case_match_check_1:
            raise TypeError(
                "unknown experimental design {_coconut_format_0!r}".format(
                    _coconut_format_0=(design)))

        surrogate_kwargs = dict(dim=self.opt_problem.dim,
                                lb=self.opt_problem.lb,
                                ub=self.opt_problem.ub)
        _coconut_case_match_to_2 = surrogate
        _coconut_case_match_check_2 = False
        if _coconut_case_match_to_2 == "RBF":
            _coconut_case_match_check_2 = True
        if _coconut_case_match_check_2:
            self.surrogate = RBFInterpolant(
                kernel=LinearKernel() if design is None else CubicKernel(),
                tail=ConstantTail(self.opt_problem.dim)
                if design is None else LinearTail(self.opt_problem.dim),
                **surrogate_kwargs)
        if not _coconut_case_match_check_2:
            if _coconut_case_match_to_2 == "GP":
                _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                self.surrogate = GPRegressor(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            _coconut_match_set_name_surrogate_cls = _coconut_sentinel
            _coconut_match_set_name_surrogate_cls = _coconut_case_match_to_2
            _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                if _coconut_match_set_name_surrogate_cls is not _coconut_sentinel:
                    surrogate_cls = _coconut_case_match_to_2
            if _coconut_case_match_check_2 and not (callable(surrogate_cls)):
                _coconut_case_match_check_2 = False
            if _coconut_case_match_check_2:
                self.surrogate = surrogate_cls(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            raise TypeError("unknown surrogate {_coconut_format_0!r}".format(
                _coconut_format_0=(surrogate)))

        strategy_kwargs = dict(max_evals=sys.maxsize,
                               opt_prob=self.opt_problem,
                               exp_design=self.exp_design,
                               surrogate=self.surrogate,
                               asynchronous=True,
                               batch_size=1)
        _coconut_case_match_to_3 = strategy
        _coconut_case_match_check_3 = False
        if _coconut_case_match_to_3 == "SRBF":
            _coconut_case_match_check_3 = True
        if _coconut_case_match_check_3:
            self.strategy = SRBFStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "EI":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = EIStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "DYCORS":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = DYCORSStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "LCB":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = LCBStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            _coconut_match_set_name_strategy_cls = _coconut_sentinel
            _coconut_match_set_name_strategy_cls = _coconut_case_match_to_3
            _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                if _coconut_match_set_name_strategy_cls is not _coconut_sentinel:
                    strategy_cls = _coconut_case_match_to_3
            if _coconut_case_match_check_3 and not (callable(strategy_cls)):
                _coconut_case_match_check_3 = False
            if _coconut_case_match_check_3:
                self.strategy = strategy_cls(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            raise TypeError("unknown strategy {_coconut_format_0!r}".format(
                _coconut_format_0=(strategy)))
Пример #25
0
    def run(self):
        """
        Run the optimization
        @return: Nothing
        """

        self.problem = VoltageOptimizationProblem(
            self.circuit,
            self.options,
            self.max_iter,
            callback=self.progress_signal.emit)

        # # (1) Optimization problem
        # # print(data.info)
        #
        # # (2) Experimental design
        # # Use a symmetric Latin hypercube with 2d + 1 samples
        # exp_des = SymmetricLatinHypercube(dim=self.problem.dim, npts=2 * self.problem.dim + 1)
        #
        # # (3) Surrogate model
        # # Use a cubic RBF interpolant with a linear tail
        # surrogate = RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=self.max_eval)
        #
        # # (4) Adaptive sampling
        # # Use DYCORS with 100d candidate points
        # adapt_samp = CandidateDYCORS(data=self, numcand=100 * self.dim)
        #
        # # Use the serial controller (uses only one thread)
        # controller = SerialController(self.objfunction)
        #
        # # (5) Use the sychronous strategy without non-bound constraints
        # strategy = SyncStrategyNoConstraints(worker_id=0,
        #                                      data=self,
        #                                      maxeval=self.max_eval,
        #                                      nsamples=1,
        #                                      exp_design=exp_des,
        #                                      response_surface=surrogate,
        #                                      sampling_method=adapt_samp)
        #
        # controller.strategy = strategy
        #
        # # Run the optimization strategy
        # result = controller.run()
        #
        # # Print the final result
        # print('Best value found: {0}'.format(result.value))
        # print('Best solution found: {0}'.format(np.array_str(result.params[0], max_line_width=np.inf, precision=5,
        #                                                      suppress_small=True)))

        num_threads = 4

        surrogate_model = GPRegressor(dim=self.problem.dim)
        sampler = SymmetricLatinHypercube(dim=self.problem.dim,
                                          num_pts=2 * (self.problem.dim + 1))

        # Create a strategy and a controller
        controller = ThreadController()
        controller.strategy = SRBFStrategy(max_evals=self.max_iter,
                                           opt_prob=self.problem,
                                           exp_design=sampler,
                                           surrogate=surrogate_model,
                                           asynchronous=True,
                                           batch_size=num_threads)

        print("Number of threads: {}".format(num_threads))
        print("Maximum number of evaluations: {}".format(self.max_iter))
        print("Strategy: {}".format(controller.strategy.__class__.__name__))
        print("Experimental design: {}".format(sampler.__class__.__name__))
        print("Surrogate: {}".format(surrogate_model.__class__.__name__))

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, self.problem.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()

        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}\n'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=4,
                         suppress_small=True)))

        self.solution = result.params[0]

        # Extract function values from the controller
        self.optimization_values = np.array(
            [o.value for o in controller.fevals])

        # send the finnish signal
        self.progress_signal.emit(0.0)
        self.progress_text.emit('Done!')
        self.done_signal.emit()
Пример #26
0
    def optimize(self):
        """Method used to run the Genetic algorithm

        :return: Returns the best individual and its function value
        :rtype: numpy.array, float
        """
        #  Initialize population
        if isinstance(self.start, np.ndarray):
            if self.start.shape[0] != self.nindividuals or \
                    self.start.shape[1] != self.nvariables:
                raise ValueError("Initial population has incorrect size")
            if any(np.min(self.start, axis=0) >= self.lower_boundary) or \
                    any(np.max(self.start, axis=0) <= self.upper_boundary):
                raise ValueError("Initial population is outside the domain")
            population = self.start
        elif self.start == "SLHD":
            exp_des = SymmetricLatinHypercube(
                self.nvariables, self.nindividuals)
            population = self.lower_boundary + exp_des.generate_points() * \
                (self.upper_boundary - self.lower_boundary)
        elif self.start == "LHD":
            exp_des = LatinHypercube(self.nvariables, self.nindividuals)
            population = self.lower_boundary + exp_des.generate_points() * \
                (self.upper_boundary - self.lower_boundary)
        elif self.start == "Random":
            population = self.lower_boundary + np.random.rand(
                self.nindividuals, self.nvariables) *\
                (self.upper_boundary - self.lower_boundary)
        else:
            raise ValueError("Unknown argument for initial population")

        new_population = []
        #  Round positions
        if len(self.integer_variables) > 0:
            new_population = np.copy(population)
            population[:, self.integer_variables] = np.round(
                population[:, self.integer_variables])
            for i in self.integer_variables:
                ind = np.where(population[:, i] < self.lower_boundary[i])
                population[ind, i] += 1
                ind = np.where(population[:, i] > self.upper_boundary[i])
                population[ind, i] -= 1

        #  Evaluate all individuals
        function_values = self.function(population)
        if len(function_values.shape) == 2:
            function_values = np.squeeze(np.asarray(function_values))

        # Save the best individual
        ind = np.argmin(function_values)
        best_individual = np.copy(population[ind, :])
        best_value = function_values[ind]

        if len(self.integer_variables) > 0:
            population = new_population

        # Main loop
        for _ in range(self.ngenerations):
            # Do tournament selection to select the parents
            competitors = np.random.randint(
                0, self.nindividuals,
                (self.nindividuals, self.tournament_size))
            ind = np.argmin(function_values[competitors], axis=1)
            winner_indices = np.zeros(self.nindividuals, dtype=int)
            for i in range(self.tournament_size):  # This loop is short
                winner_indices[np.where(ind == i)] = \
                    competitors[np.where(ind == i), i]

            parent1 = population[
                winner_indices[0:self.nindividuals//2], :]
            parent2 = population[
                winner_indices[self.nindividuals//2:self.nindividuals], :]

            # Averaging Crossover
            cross = np.where(np.random.rand(
                self.nindividuals//2) < self.p_cross)[0]
            nn = len(cross)  # Number of crossovers
            alpha = np.random.rand(nn, 1)

            # Create the new chromosomes
            parent1_new = np.multiply(alpha, parent1[cross, :]) + \
                np.multiply(1 - alpha, parent2[cross, :])
            parent2_new = np.multiply(alpha, parent2[cross, :]) + \
                np.multiply(1 - alpha, parent1[cross, :])
            parent1[cross, :] = parent1_new
            parent2[cross, :] = parent2_new
            population = np.concatenate((parent1, parent2))

            # Apply mutation
            scale_factors = self.sigma * (
                self.upper_boundary - self.lower_boundary)  # Scale
            perturbation = np.random.randn(
                self.nindividuals, self.nvariables)  # Generate perturbations
            perturbation = np.multiply(
                perturbation, scale_factors)  # Scale accordingly
            perturbation = np.multiply(perturbation, (
                np.random.rand(self.nindividuals,
                               self.nvariables) < self.p_mutation))

            population += perturbation  # Add perturbation
            population = np.maximum(np.reshape(
                self.lower_boundary, (1, self.nvariables)), population)
            population = np.minimum(np.reshape(
                self.upper_boundary, (1, self.nvariables)), population)

            # Round chromosomes
            new_population = []
            if len(self.integer_variables) > 0:
                new_population = np.copy(population)
                population = round_vars(population, self.integer_variables,
                                        self.lower_boundary,
                                        self.upper_boundary)

            # Keep the best individual
            population[0, :] = best_individual

            #  Evaluate all individuals
            function_values = self.function(population)
            if len(function_values.shape) == 2:
                function_values = np.squeeze(np.asarray(function_values))

            # Save the best individual
            ind = np.argmin(function_values)
            best_individual = np.copy(population[ind, :])
            best_value = function_values[ind]

            # Use the positions that are not rounded
            if len(self.integer_variables) > 0:
                population = new_population

        return best_individual, best_value
Пример #27
0
    def pysot_cube(objective,
                   n_trials,
                   n_dim,
                   with_count=False,
                   method=None,
                   design=None):
        """ Minimize
        :param objective:
        :param n_trials:
        :param n_dim:
        :param with_count:
        :return:
        """
        logging.getLogger('pySOT').setLevel(logging.ERROR)

        num_threads = 1
        asynchronous = True

        max_evals = n_trials
        gp = GenericProblem(dim=n_dim, objective=objective)

        if design == 'latin':
            exp_design = LatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1))
        elif design == 'symmetric':
            exp_design = SymmetricLatinHypercube(dim=n_dim,
                                                 num_pts=2 * (n_dim + 1))
        elif design == 'factorial':
            exp_design = TwoFactorial(dim=n_dim)
        else:
            raise ValueError('design should be latin, symmetric or factorial')

        # Create a strategy and a controller
        #  SRBFStrategy, EIStrategy, DYCORSStrategy,RandomStrategy, LCBStrategy
        controller = ThreadController()
        if method.lower() == 'srbf':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = SRBFStrategy(max_evals=max_evals,
                                               opt_prob=gp,
                                               exp_design=exp_design,
                                               surrogate=surrogate,
                                               asynchronous=asynchronous)
        elif method.lower() == 'ei':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = EIStrategy(max_evals=max_evals,
                                             opt_prob=gp,
                                             exp_design=exp_design,
                                             surrogate=surrogate,
                                             asynchronous=asynchronous)
        elif method.lower() == 'dycors':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                                 opt_prob=gp,
                                                 exp_design=exp_design,
                                                 surrogate=surrogate,
                                                 asynchronous=asynchronous)
        elif method.lower() == 'lcb':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = LCBStrategy(max_evals=max_evals,
                                              opt_prob=gp,
                                              exp_design=exp_design,
                                              surrogate=surrogate,
                                              asynchronous=asynchronous)
        elif method.lower() == 'random':
            controller.strategy = RandomStrategy(max_evals=max_evals,
                                                 opt_prob=gp)
        else:
            raise ValueError("Didn't recognize method passed to pysot")

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, gp.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()
        best_x = result.params[0].tolist()
        return (result.value, best_x,
                gp.feval_count) if with_count else (result.value, best_x)
Пример #28
0
    def __init__(self,
                 worker_id,
                 data,
                 response_surface,
                 maxeval,
                 nsamples,
                 exp_design=None,
                 sampling_method=None,
                 archiving_method=None,
                 extra=None,
                 extra_vals=None,
                 store_sim=False):

        # Check stopping criterion
        self.start_time = time.time()
        if maxeval < 0:  # Time budget
            self.maxeval = np.inf
            self.time_budget = np.abs(maxeval)
        else:
            self.maxeval = maxeval
            self.time_budget = np.inf

        # Import problem information
        self.worker_id = worker_id
        self.data = data
        self.fhat = []
        if response_surface is None:
            for i in range(self.data.nobj):
                self.fhat.append(
                    RBFInterpolant(kernel=CubicKernel,
                                   tail=LinearTail,
                                   maxp=maxeval))  #MOPLS ONLY
        else:
            for i in range(self.data.nobj):
                response_surface.reset()  # Just to be sure!
                self.fhat.append(deepcopy(response_surface))  #MOPLS ONLY

        self.ncenters = nsamples
        self.nsamples = 1
        self.numinit = None
        self.extra = extra
        self.extra_vals = extra_vals
        self.store_sim = store_sim

        # Default to generate sampling points using Symmetric Latin Hypercube
        self.design = exp_design
        if self.design is None:
            if self.data.dim > 50:
                self.design = LatinHypercube(data.dim, data.dim + 1)
            else:
                self.design = SymmetricLatinHypercube(data.dim,
                                                      2 * (data.dim + 1))

        self.xrange = np.asarray(data.xup - data.xlow)

        # algorithm parameters
        self.sigma_min = 0.005
        self.sigma_max = 0.2
        self.sigma_init = 0.2

        self.failtol = max(5, data.dim)
        self.failcount = 0
        self.contol = 5
        self.numeval = 0
        self.status = 0
        self.sigma = 0
        self.resubmitter = RetryStrategy()
        self.xbest = None
        self.fbest = None
        self.fbest_old = None
        self.improvement_prev = 1

        # population of centers and long-term archive
        self.nd_archives = []
        self.new_pop = []
        self.sim_res = []
        if archiving_method is None:
            self.memory_archive = NonDominatedArchive(200)
        else:
            self.memory_archive = archiving_method
        self.evals = []
        self.maxfit = min(200, 20 * self.data.dim)
        self.d_thresh = 1.0

        # Set up search procedures and initialize
        self.sampling = sampling_method
        if self.sampling is None:
            self.sampling = EvolutionaryAlgorithm(data)

        self.check_input()

        # Start with first experimental design
        self.sample_initial()