def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(
            dim=self.opt.dim,
            lb=self.opt.lb,
            ub=self.opt.ub,
            kernel=CubicKernel(),
            tail=LinearTail(self.opt.dim),
            eta=1e-4,
        )

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )
def init():
    print("\nInitializing run...")

    rbf = RBFInterpolant(
        dim=ackley.dim, kernel=CubicKernel(),
        tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True, batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    result = controller.run()
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
Esempio n. 3
0
def test_srbf_async():
    max_evals = 200
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=None)

    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)
    controller.run()

    check_strategy(controller)
def init():
    print("\nInitializing run...")
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    print("Number of workers: 1")
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Wrap controller in checkpoint object
    controller = CheckpointController(controller, fname=fname)
    result = controller.run()
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 5
0
def example_extra_vals():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_extra_vals.log"):
        os.remove("./logfiles/example_extra_vals.log")
    logging.basicConfig(filename="./logfiles/example_extra_vals.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    num_extra = 10
    extra = np.random.uniform(ackley.lb, ackley.ub, (num_extra, ackley.dim))
    extra_vals = np.nan * np.ones((num_extra, 1))
    for i in range(num_extra):  # Evaluate every second point
        if i % 2 == 0:
            extra_vals[i] = ackley.eval(extra[i, :])

    rbf = RBFInterpolant(dim=ackley.dim, kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True, batch_size=num_threads,
        extra_points=extra, extra_vals=extra_vals)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Append the known function values to the POAP database since
    # POAP won't evaluate these points
    for i in range(len(extra_vals)):
        if not np.isnan(extra_vals[i]):
            record = EvalRecord(
                params=(np.ravel(extra[i, :]),), status='completed')
            record.value = extra_vals[i]
            record.feasible = True
            controller.fevals.append(record)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
def example_matlab_engine():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_matlab_engine.log"):
        os.remove("./logfiles/example_matlab_engine.log")
    logging.basicConfig(filename="./logfiles/example_matlab_engine.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Use the serial controller (uses only one thread)
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads
    for _ in range(num_threads):
        try:
            worker = MatlabWorker(controller)
            worker.matlab = matlab.engine.start_matlab()
            controller.launch_worker(worker)
        except Exception as e:
            print("\nERROR: Failed to initialize a MATLAB session.\n")
            print(str(e))
            return

    # Run the optimization strategy
    result = controller.run()

    # Print the final result
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 7
0
def example_subprocess_files():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_subprocess_files.log"):
        os.remove("./logfiles/example_subprocess_files.log")
    logging.basicConfig(filename="./logfiles/example_subprocess_files.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 200")
    print("Sampling method: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile(path), "You need to build sphere_ext_files"

    num_threads = 4
    max_evals = 200

    sphere = Sphere(dim=10)
    rbf = RBFInterpolant(dim=sphere.dim,
                         kernel=TPSKernel(),
                         tail=LinearTail(sphere.dim))
    slhd = SymmetricLatinHypercube(dim=sphere.dim,
                                   num_pts=2 * (sphere.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=sphere,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=False,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for i in range(num_threads):
        worker = CppSim(controller)
        worker.my_filename = str(i) + ".txt"
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 8
0
def example_simple():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_simple.log"):
        os.remove("./logfiles/example_simple.log")
    logging.basicConfig(filename="./logfiles/example_simple.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 500")
    print("Sampling method: CandidateDYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    num_threads = 4
    max_evals = 500

    ackley = Ackley(dim=10)
    rbf = SurrogateUnitBox(RBFInterpolant(dim=ackley.dim,
                                          kernel=CubicKernel(),
                                          tail=LinearTail(ackley.dim)),
                           lb=ackley.lb,
                           ub=ackley.ub)
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
def example_subprocess_partial_info():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_subprocess_partial_info.log"):
        os.remove("./logfiles/example_subprocess_partial_info.log")
    logging.basicConfig(
        filename="./logfiles/example_subprocess_partial_info.log",
        level=logging.INFO)

    assert os.path.isfile(path), "You need to build sumfun_ext"

    num_threads = 4
    max_evals = 200

    sumfun = SumfunExt(dim=10)
    rbf = RBFInterpolant(dim=sumfun.dim,
                         lb=sumfun.lb,
                         ub=sumfun.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(sumfun.dim))
    slhd = SymmetricLatinHypercube(dim=sumfun.dim,
                                   num_pts=2 * (sumfun.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=sumfun,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        controller.launch_worker(CppSim(controller))

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 10
0
def example_mars():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_mars.log"):
        os.remove("./logfiles/example_mars.log")
    logging.basicConfig(filename="./logfiles/example_mars.log",
                        level=logging.INFO)

    num_threads = 4
    max_evals = 200

    ackley = Ackley(dim=5)
    try:
        mars = MARSInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub)
    except Exception as e:
        print(str(e))
        return
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=mars,
                                       asynchronous=True,
                                       batch_size=num_threads)

    print("Number of threads: {}".format(num_threads))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(mars.__class__.__name__))

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 11
0
def main_master(num_workers):
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/test_subprocess_mpi.log"):
        os.remove("./logfiles/test_subprocess_mpi.log")
    logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log",
                        level=logging.INFO)

    print("\nTesting the POAP MPI controller with {0} workers".format(
        num_workers))
    print("Maximum number of evaluations: 200")
    print("Search strategy: Candidate DYCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")

    assert os.path.isfile(path), "You need to build sphere_ext"

    max_evals = 200

    sphere = Sphere(dim=10)
    rbf = RBFInterpolant(dim=sphere.dim,
                         kernel=CubicKernel(),
                         tail=LinearTail(sphere.dim))
    slhd = SymmetricLatinHypercube(dim=sphere.dim,
                                   num_pts=2 * (sphere.dim + 1))

    # Create a strategy and a controller
    strategy = SRBFStrategy(max_evals=max_evals,
                            opt_prob=sphere,
                            exp_design=slhd,
                            surrogate=rbf,
                            asynchronous=True,
                            batch_size=num_workers)
    controller = MPIController(strategy)

    print("Number of threads: {}".format(num_workers))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    # Run the optimization strategy
    result = controller.run()
    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 12
0
def test_srbf_serial():
    max_evals = 200
    rbf = RBFInterpolant(
        dim=ackley.dim, kernel=CubicKernel(),
        tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(
        dim=ackley.dim, num_pts=2*(ackley.dim+1))

    # Create a strategy and a controller
    controller = SerialController(ackley.eval)
    controller.strategy = SRBFStrategy(
        max_evals=max_evals, opt_prob=ackley, exp_design=slhd,
        surrogate=rbf, asynchronous=True)
    controller.run()

    check_strategy(controller)
Esempio n. 13
0
def test_example_simple():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/example_simple.log"):
        os.remove("./logfiles/example_simple.log")
    logging.basicConfig(filename="./logfiles/example_simple.log",
                        level=logging.INFO)

    num_threads = 2
    max_evals = 50

    ackley = Ackley(dim=10)
    rbf = RBFInterpolant(dim=ackley.dim,
                         lb=ackley.lb,
                         ub=ackley.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(ackley.dim))
    slhd = SymmetricLatinHypercube(dim=ackley.dim,
                                   num_pts=2 * (ackley.dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=ackley,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, ackley.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 14
0
def main_master(opt_prob, num_workers):
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    if os.path.exists("./logfiles/mpiexample_mpi.log"):
        os.remove("./logfiles/mpiexample_mpi.log")
    logging.basicConfig(filename="./logfiles/mpiexample_mpi.log",
                        level=logging.INFO)

    max_evals = 500

    rbf = RBFInterpolant(dim=opt_prob.dim,
                         lb=opt_prob.lb,
                         ub=opt_prob.ub,
                         kernel=CubicKernel(),
                         tail=LinearTail(opt_prob.dim))
    slhd = SymmetricLatinHypercube(dim=opt_prob.dim,
                                   num_pts=2 * (opt_prob.dim + 1))

    # Create a strategy and a controller
    strategy = SRBFStrategy(
        max_evals=max_evals,
        opt_prob=opt_prob,
        exp_design=slhd,
        surrogate=rbf,
        asynchronous=True,
        batch_size=num_workers,
    )
    controller = MPIController(strategy)

    print("Number of workers: {}".format(num_workers))
    print("Maximum number of evaluations: {}".format(max_evals))
    print("Strategy: {}".format(controller.strategy.__class__.__name__))
    print("Experimental design: {}".format(slhd.__class__.__name__))
    print("Surrogate: {}".format(rbf.__class__.__name__))

    result = controller.run()
    print("Best value found: {0}".format(result.value))
    print("Best solution found: {0}\n".format(
        np.array_str(result.params[0],
                     max_line_width=np.inf,
                     precision=5,
                     suppress_small=True)))
Esempio n. 15
0
def pysot_cube(objective, scale, n_trials, n_dim, with_count=False):

    if False:
        if not os.path.exists("./logfiles"):
            os.makedirs("logfiles")
        if os.path.exists("./logfiles/example_simple.log"):
            os.remove("./logfiles/example_simple.log")
        logging.basicConfig(filename="./logfiles/example_simple.log",
                            level=logging.INFO)

    num_threads = 2
    max_evals = n_trials
    gp = GenericProblem(dim=n_dim, objective=objective, scale=scale)
    rbf = RBFInterpolant(dim=n_dim,
                         lb=np.array([-scale] * n_dim),
                         ub=np.array([scale] * n_dim),
                         kernel=CubicKernel(),
                         tail=LinearTail(n_dim))
    slhd = SymmetricLatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1))

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = SRBFStrategy(max_evals=max_evals,
                                       opt_prob=gp,
                                       exp_design=slhd,
                                       surrogate=rbf,
                                       asynchronous=True)

    # Launch the threads and give them access to the objective function
    for _ in range(num_threads):
        worker = BasicWorkerThread(controller, gp.eval)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
    return (result.value, gp.feval_count) if with_count else result.value
Esempio n. 16
0
class tuSOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob()  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )
        self.meta_init = self.get_init_hp()

    def get_init_hp(self):
        api_config = self.api_config
        feature = get_api_config_feature(api_config)
        result, values = [], []
        for i in range(9):
            model = joblib.load(
                './lgb_meta_model_{}.pkl'.format(
                    i))
            value = model.predict(np.array(feature).reshape(1, -1))[-1]
            values.append(value)
            result.append(math.e ** value)
        result_dict = {}
        for i, (k, v) in enumerate(api_config.items()):
            if v['type'] == "real":
                lower, upper = v["range"][0], v["range"][1]
                if lower < result[i] < upper:
                    out = result[i]
                else:
                    out = np.random.uniform(lower, upper)
            elif v['type'] == "int":
                lower, upper = v["range"][0], v["range"][1]
                if lower < result[i] < upper:
                    out = int(result[i])
                else:
                    out = np.random.randint(lower, upper)
            elif v['type'] == "bool":
                out = True if int(result[i]) >= 1 else False
            else:
                out = api_config[k]["values"][0]
            result_dict[k] = out
        return result[:len(api_config)], values[:len(api_config)], result_dict

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.pysot_batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim, kernel=CubicKernel(), tail=LinearTail(self.opt.dim), eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def pysot_suggest(self, n_suggestions=1):
        if self.pysot_batch_size is None:  # First call to suggest
            self.pysot_batch_size = n_suggestions
            self.start()

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start()
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def turbo_suggest(self, n_suggestions=1):
        if self.turbo_batch_size is None:  # Remember the batch size on the first call to suggest
            self.turbo_batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(np.max([4.0 / self.turbo_batch_size, self.dim / self.turbo_batch_size]))
            self.turbo.n_init = max([self.turbo.n_init, self.turbo_batch_size])
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        if n_adapt > 0:
            if len(self.turbo._X) > 0:  # Use random points if we can't fit a GP
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X, fX, length=self.turbo.length, n_training_steps=100, hypers={}
                )
                X_next[-n_adapt:, :] = self.turbo._select_candidates(X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :], self.lb, self.ub)
        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        suggestions[-1] = self.meta_init[2]
        return suggestions

    def suggest(self, n_suggestions=1):
        if n_suggestions == 1:
            return self.turbo_suggest(n_suggestions)
        else:
            suggestion = n_suggestions // 2
            return self.turbo_suggest(suggestion) + self.pysot_suggest(n_suggestions - suggestion)

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        if np.any(idx):
            i = np.argwhere(idx)[0].item()  # Pick the first index if there are ties
            proposal = self.proposals[i]
            proposal.record.complete(y)
            self.proposals.pop(i)
            self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)

        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.turbo_batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()
Esempio n. 17
0
File: pysot.py Progetto: evhub/bbopt
    def setup_backend(
        self,
        params,
        strategy="SRBF",
        surrogate="RBF",
        design=None,
    ):
        self.opt_problem = BBoptOptimizationProblem(params)

        design_kwargs = dict(dim=self.opt_problem.dim)
        _coconut_case_match_to_1 = design
        _coconut_case_match_check_1 = False
        if _coconut_case_match_to_1 is None:
            _coconut_case_match_check_1 = True
        if _coconut_case_match_check_1:
            self.exp_design = EmptyExperimentalDesign(**design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = LatinHypercube(num_pts=2 *
                                                 (self.opt_problem.dim + 1),
                                                 **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "symmetric_latin_hypercube":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = SymmetricLatinHypercube(
                    num_pts=2 * (self.opt_problem.dim + 1), **design_kwargs)
        if not _coconut_case_match_check_1:
            if _coconut_case_match_to_1 == "two_factorial":
                _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                self.exp_design = TwoFactorial(**design_kwargs)
        if not _coconut_case_match_check_1:
            _coconut_match_set_name_design_cls = _coconut_sentinel
            _coconut_match_set_name_design_cls = _coconut_case_match_to_1
            _coconut_case_match_check_1 = True
            if _coconut_case_match_check_1:
                if _coconut_match_set_name_design_cls is not _coconut_sentinel:
                    design_cls = _coconut_case_match_to_1
            if _coconut_case_match_check_1 and not (callable(design_cls)):
                _coconut_case_match_check_1 = False
            if _coconut_case_match_check_1:
                self.exp_design = design_cls(**design_kwargs)
        if not _coconut_case_match_check_1:
            raise TypeError(
                "unknown experimental design {_coconut_format_0!r}".format(
                    _coconut_format_0=(design)))

        surrogate_kwargs = dict(dim=self.opt_problem.dim,
                                lb=self.opt_problem.lb,
                                ub=self.opt_problem.ub)
        _coconut_case_match_to_2 = surrogate
        _coconut_case_match_check_2 = False
        if _coconut_case_match_to_2 == "RBF":
            _coconut_case_match_check_2 = True
        if _coconut_case_match_check_2:
            self.surrogate = RBFInterpolant(
                kernel=LinearKernel() if design is None else CubicKernel(),
                tail=ConstantTail(self.opt_problem.dim)
                if design is None else LinearTail(self.opt_problem.dim),
                **surrogate_kwargs)
        if not _coconut_case_match_check_2:
            if _coconut_case_match_to_2 == "GP":
                _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                self.surrogate = GPRegressor(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            _coconut_match_set_name_surrogate_cls = _coconut_sentinel
            _coconut_match_set_name_surrogate_cls = _coconut_case_match_to_2
            _coconut_case_match_check_2 = True
            if _coconut_case_match_check_2:
                if _coconut_match_set_name_surrogate_cls is not _coconut_sentinel:
                    surrogate_cls = _coconut_case_match_to_2
            if _coconut_case_match_check_2 and not (callable(surrogate_cls)):
                _coconut_case_match_check_2 = False
            if _coconut_case_match_check_2:
                self.surrogate = surrogate_cls(**surrogate_kwargs)
        if not _coconut_case_match_check_2:
            raise TypeError("unknown surrogate {_coconut_format_0!r}".format(
                _coconut_format_0=(surrogate)))

        strategy_kwargs = dict(max_evals=sys.maxsize,
                               opt_prob=self.opt_problem,
                               exp_design=self.exp_design,
                               surrogate=self.surrogate,
                               asynchronous=True,
                               batch_size=1)
        _coconut_case_match_to_3 = strategy
        _coconut_case_match_check_3 = False
        if _coconut_case_match_to_3 == "SRBF":
            _coconut_case_match_check_3 = True
        if _coconut_case_match_check_3:
            self.strategy = SRBFStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "EI":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = EIStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "DYCORS":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = DYCORSStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            if _coconut_case_match_to_3 == "LCB":
                _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                self.strategy = LCBStrategy(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            _coconut_match_set_name_strategy_cls = _coconut_sentinel
            _coconut_match_set_name_strategy_cls = _coconut_case_match_to_3
            _coconut_case_match_check_3 = True
            if _coconut_case_match_check_3:
                if _coconut_match_set_name_strategy_cls is not _coconut_sentinel:
                    strategy_cls = _coconut_case_match_to_3
            if _coconut_case_match_check_3 and not (callable(strategy_cls)):
                _coconut_case_match_check_3 = False
            if _coconut_case_match_check_3:
                self.strategy = strategy_cls(**strategy_kwargs)
        if not _coconut_case_match_check_3:
            raise TypeError("unknown strategy {_coconut_format_0!r}".format(
                _coconut_format_0=(strategy)))
Esempio n. 18
0
    def run(self):
        """
        Run the optimization
        @return: Nothing
        """

        self.problem = VoltageOptimizationProblem(
            self.circuit,
            self.options,
            self.max_iter,
            callback=self.progress_signal.emit)

        # # (1) Optimization problem
        # # print(data.info)
        #
        # # (2) Experimental design
        # # Use a symmetric Latin hypercube with 2d + 1 samples
        # exp_des = SymmetricLatinHypercube(dim=self.problem.dim, npts=2 * self.problem.dim + 1)
        #
        # # (3) Surrogate model
        # # Use a cubic RBF interpolant with a linear tail
        # surrogate = RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=self.max_eval)
        #
        # # (4) Adaptive sampling
        # # Use DYCORS with 100d candidate points
        # adapt_samp = CandidateDYCORS(data=self, numcand=100 * self.dim)
        #
        # # Use the serial controller (uses only one thread)
        # controller = SerialController(self.objfunction)
        #
        # # (5) Use the sychronous strategy without non-bound constraints
        # strategy = SyncStrategyNoConstraints(worker_id=0,
        #                                      data=self,
        #                                      maxeval=self.max_eval,
        #                                      nsamples=1,
        #                                      exp_design=exp_des,
        #                                      response_surface=surrogate,
        #                                      sampling_method=adapt_samp)
        #
        # controller.strategy = strategy
        #
        # # Run the optimization strategy
        # result = controller.run()
        #
        # # Print the final result
        # print('Best value found: {0}'.format(result.value))
        # print('Best solution found: {0}'.format(np.array_str(result.params[0], max_line_width=np.inf, precision=5,
        #                                                      suppress_small=True)))

        num_threads = 4

        surrogate_model = GPRegressor(dim=self.problem.dim)
        sampler = SymmetricLatinHypercube(dim=self.problem.dim,
                                          num_pts=2 * (self.problem.dim + 1))

        # Create a strategy and a controller
        controller = ThreadController()
        controller.strategy = SRBFStrategy(max_evals=self.max_iter,
                                           opt_prob=self.problem,
                                           exp_design=sampler,
                                           surrogate=surrogate_model,
                                           asynchronous=True,
                                           batch_size=num_threads)

        print("Number of threads: {}".format(num_threads))
        print("Maximum number of evaluations: {}".format(self.max_iter))
        print("Strategy: {}".format(controller.strategy.__class__.__name__))
        print("Experimental design: {}".format(sampler.__class__.__name__))
        print("Surrogate: {}".format(surrogate_model.__class__.__name__))

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, self.problem.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()

        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}\n'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=4,
                         suppress_small=True)))

        self.solution = result.params[0]

        # Extract function values from the controller
        self.optimization_values = np.array(
            [o.value for o in controller.fevals])

        # send the finnish signal
        self.progress_signal.emit(0.0)
        self.progress_text.emit('Done!')
        self.done_signal.emit()
Esempio n. 19
0
class tuSOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob(
        )  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.turbo_batch_size = None
        self.pysot_batch_size = None
        self.history = []
        self.proposals = []
        self.lb, self.ub = self.bounds[:, 0], self.bounds[:, 1]
        self.dim = len(self.bounds)

        self.turbo = Turbo1(
            f=None,
            lb=self.bounds[:, 0],
            ub=self.bounds[:, 1],
            n_init=2 * self.dim + 1,
            max_evals=self.max_evals,
            batch_size=4,  # We need to update this later
            verbose=False,
        )

        # hyperopt
        self.random = np_random

        space, self.round_to_values = tuSOTOptimizer.get_hyperopt_dimensions(
            api_config)
        self.domain = Domain(dummy_f, space, pass_expr_memo_ctrl=None)
        self.trials = Trials()

        # Some book keeping like opentuner wrapper
        self.trial_id_lookup = {}

        # Store just for data validation
        self.param_set_chk = frozenset(api_config.keys())

    def restart(self):
        self.turbo._restart()
        self.turbo._X = np.zeros((0, self.turbo.dim))
        self.turbo._fX = np.zeros((0, 1))
        X_init = latin_hypercube(self.turbo.n_init, self.dim)
        self.X_init = from_unit_cube(X_init, self.lb, self.ub)

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.pysot_batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim,
                             kernel=CubicKernel(),
                             tail=LinearTail(self.opt.dim),
                             eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    @staticmethod
    def hashable_dict(d):
        hashable_object = frozenset(d.items())
        return hashable_object

    @staticmethod
    def get_hyperopt_dimensions(api_config):
        param_list = sorted(api_config.keys())

        space = {}
        round_to_values = {}
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]

            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
                round_to_values[param_name] = interp1d(
                    param_values,
                    param_values,
                    kind="nearest",
                    fill_value="extrapolate")

            if param_type == "int":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.qloguniform(param_name, np.log(low),
                                                       np.log(high), 1)
                else:
                    space[param_name] = hp.quniform(param_name, low, high, 1)
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                space[param_name] = hp.choice(param_name, (False, True))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                space[param_name] = hp.choice(param_name, param_values)
            elif param_type == "real":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.loguniform(param_name, np.log(low),
                                                      np.log(high))
                else:
                    space[param_name] = hp.uniform(param_name, low, high)
            else:
                assert False, "type %s not handled in API" % param_type

        return space, round_to_values

    def get_trial(self, trial_id):
        for trial in self.trials._dynamic_trials:
            if trial["tid"] == trial_id:
                assert isinstance(trial, dict)
                # Make sure right kind of dict
                assert "state" in trial and "result" in trial
                assert trial["state"] == JOB_STATE_NEW
                return trial
        assert False, "No matching trial ID"

    def cleanup_guess(self, x_guess):
        assert isinstance(x_guess, dict)
        # Also, check the keys are only the vars we are searching over:
        assert frozenset(x_guess.keys()) == self.param_set_chk

        # Do the rounding
        # Make a copy to be safe, and also unpack singletons
        # We may also need to consider clip_chk at some point like opentuner
        x_guess = {k: only(x_guess[k]) for k in x_guess}
        for param_name, round_f in self.round_to_values.items():
            x_guess[param_name] = round_f(x_guess[param_name])
        # Also ensure this is correct dtype so sklearn is happy
        x_guess = {
            k: DTYPE_MAP[self.api_config[k]["type"]](x_guess[k])
            for k in x_guess
        }
        return x_guess

    def pysot_suggest(self, n_suggestions=1):
        if self.pysot_batch_size is None:  # First call to suggest
            self.pysot_batch_size = n_suggestions
            self.start()

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start()
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def pysot_get_suggest(self, suggests):
        turbo_suggest_warps = self.space_x.warp(suggests)
        for i, warps in enumerate(turbo_suggest_warps):
            proposal = self.strategy.make_proposal(warps)
            proposal.add_callback(self.strategy.on_initial_proposal)
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()

            self.history.append(copy(suggests[i]))
            self.proposals.append(proposal)

    def turbo_suggest(self, n_suggestions=1):
        if self.turbo_batch_size is None:  # Remember the batch size on the first call to suggest
            self.turbo_batch_size = n_suggestions
            self.turbo.batch_size = n_suggestions
            self.turbo.failtol = np.ceil(
                np.max([
                    4.0 / self.turbo_batch_size,
                    self.dim / self.turbo_batch_size
                ]))
            self.turbo.n_init = max([self.turbo.n_init, self.turbo_batch_size])
            self.restart()

        X_next = np.zeros((n_suggestions, self.dim))

        # Pick from the initial points
        n_init = min(len(self.X_init), n_suggestions)
        if n_init > 0:
            X_next[:n_init] = deepcopy(self.X_init[:n_init, :])
            self.X_init = self.X_init[
                n_init:, :]  # Remove these pending points

        # Get remaining points from TuRBO
        n_adapt = n_suggestions - n_init
        if n_adapt > 0:
            if len(self.turbo._X
                   ) > 0:  # Use random points if we can't fit a GP
                X = to_unit_cube(deepcopy(self.turbo._X), self.lb, self.ub)
                fX = copula_standardize(deepcopy(
                    self.turbo._fX).ravel())  # Use Copula
                X_cand, y_cand, _ = self.turbo._create_candidates(
                    X,
                    fX,
                    length=self.turbo.length,
                    n_training_steps=100,
                    hypers={})
                X_next[-n_adapt:, :] = self.turbo._select_candidates(
                    X_cand, y_cand)[:n_adapt, :]
                X_next[-n_adapt:, :] = from_unit_cube(X_next[-n_adapt:, :],
                                                      self.lb, self.ub)

        # Unwarp the suggestions
        suggestions = self.space_x.unwarp(X_next)
        return suggestions

    def _hyperopt_suggest(self):
        new_ids = self.trials.new_trial_ids(1)
        assert len(new_ids) == 1
        self.trials.refresh()

        seed = random_seed(self.random)
        new_trials = tpe.suggest(new_ids, self.domain, self.trials, seed)
        assert len(new_trials) == 1

        self.trials.insert_trial_docs(new_trials)
        self.trials.refresh()

        new_trial, = new_trials  # extract singleton
        return new_trial

    def _hyperopt_transform(self, x):
        new_id = self.trials.new_trial_ids(1)[0]

        domain = self.domain
        rng = np.random.RandomState(1)
        idxs, vals = pyll.rec_eval(domain.s_idxs_vals,
                                   memo={
                                       domain.s_new_ids: [new_id],
                                       domain.s_rng: rng,
                                   })
        rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)]
        rval_results = domain.new_result()
        for (k, _) in vals.items():
            vals[k][0] = x[k]
        miscs_update_idxs_vals(rval_miscs, idxs, vals)
        rval_docs = self.trials.new_trial_docs([new_id], [None], rval_results,
                                               rval_miscs)

        return rval_docs[0]

    def hyperopt_suggest(self, n_suggestions=1):
        assert n_suggestions >= 1, "invalid value for n_suggestions"

        # Get the new trials, it seems hyperopt either uses random search or
        # guesses one at a time anyway, so we might as welll call serially.
        new_trials = [self._hyperopt_suggest() for _ in range(n_suggestions)]

        X = []
        for trial in new_trials:
            x_guess = self.cleanup_guess(trial["misc"]["vals"])
            X.append(x_guess)

            # Build lookup to get original trial object
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ not in self.trial_id_lookup, "the suggestions should not already be in the trial dict"
            self.trial_id_lookup[x_guess_] = trial["tid"]

        assert len(X) == n_suggestions
        return X

    def hyperopt_get_suggest(self, suggests):
        trials = [self._hyperopt_transform(x) for x in suggests]
        for trial in trials:
            x_guess = self.cleanup_guess(trial["misc"]["vals"])
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ not in self.trial_id_lookup, "the suggestions should not already be in the trial dict"
            self.trial_id_lookup[x_guess_] = trial["tid"]
        self.trials.insert_trial_docs(trials)
        self.trials.refresh()

    def suggest(self, n_suggestions=1):
        if n_suggestions == 1:
            return self.turbo_suggest(n_suggestions)
        else:
            t_suggestion = n_suggestions // 2
            # p_suggestion = int((n_suggestions - t_suggestion) * 3/4)
            h_suggestion = n_suggestions - t_suggestion
            turbo_suggest = self.turbo_suggest(t_suggestion)
            # pysot_suggest = self.pysot_suggest(p_suggestion)
            hyperopt_suggest = self.hyperopt_suggest(h_suggestion)
            self.hyperopt_get_suggest(turbo_suggest)
            # self.pysot_get_suggest(turbo_suggest + hyperopt_suggest)
            return turbo_suggest + hyperopt_suggest

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        if np.any(idx):
            i = np.argwhere(
                idx)[0].item()  # Pick the first index if there are ties
            proposal = self.proposals[i]
            proposal.record.complete(y)
            self.proposals.pop(i)
            self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        # # pysot observe
        # for x_, y_ in zip(X, y):
        #     # Just ignore, any inf observations we got, unclear if right thing
        #     if np.isfinite(y_):
        #         self._observe(x_, y_)

        # turbo observe
        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.turbo_batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()

        # hyperopt observe
        for x_guess, y_ in zip(X, y):
            x_guess_ = tuSOTOptimizer.hashable_dict(x_guess)
            assert x_guess_ in self.trial_id_lookup, "Appears to be guess that did not originate from suggest"

            assert x_guess_ in self.trial_id_lookup, "trial object not available in trial dict"
            trial_id = self.trial_id_lookup.pop(x_guess_)
            trial = self.get_trial(trial_id)
            assert self.cleanup_guess(
                trial["misc"]["vals"]
            ) == x_guess, "trial ID not consistent with x values stored"

            # Cast to float to ensure native type
            result = {"loss": float(y_), "status": STATUS_OK}
            trial["state"] = JOB_STATE_DONE
            trial["result"] = result
        self.trials.refresh()
Esempio n. 20
0
    def pysot_cube(objective,
                   n_trials,
                   n_dim,
                   with_count=False,
                   method=None,
                   design=None):
        """ Minimize
        :param objective:
        :param n_trials:
        :param n_dim:
        :param with_count:
        :return:
        """
        logging.getLogger('pySOT').setLevel(logging.ERROR)

        num_threads = 1
        asynchronous = True

        max_evals = n_trials
        gp = GenericProblem(dim=n_dim, objective=objective)

        if design == 'latin':
            exp_design = LatinHypercube(dim=n_dim, num_pts=2 * (n_dim + 1))
        elif design == 'symmetric':
            exp_design = SymmetricLatinHypercube(dim=n_dim,
                                                 num_pts=2 * (n_dim + 1))
        elif design == 'factorial':
            exp_design = TwoFactorial(dim=n_dim)
        else:
            raise ValueError('design should be latin, symmetric or factorial')

        # Create a strategy and a controller
        #  SRBFStrategy, EIStrategy, DYCORSStrategy,RandomStrategy, LCBStrategy
        controller = ThreadController()
        if method.lower() == 'srbf':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = SRBFStrategy(max_evals=max_evals,
                                               opt_prob=gp,
                                               exp_design=exp_design,
                                               surrogate=surrogate,
                                               asynchronous=asynchronous)
        elif method.lower() == 'ei':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = EIStrategy(max_evals=max_evals,
                                             opt_prob=gp,
                                             exp_design=exp_design,
                                             surrogate=surrogate,
                                             asynchronous=asynchronous)
        elif method.lower() == 'dycors':
            surrogate = RBFInterpolant(dim=n_dim,
                                       lb=np.array([0.0] * n_dim),
                                       ub=np.array([1.0] * n_dim),
                                       kernel=CubicKernel(),
                                       tail=LinearTail(n_dim))
            controller.strategy = DYCORSStrategy(max_evals=max_evals,
                                                 opt_prob=gp,
                                                 exp_design=exp_design,
                                                 surrogate=surrogate,
                                                 asynchronous=asynchronous)
        elif method.lower() == 'lcb':
            surrogate = GPRegressor(dim=n_dim,
                                    lb=np.array([0.0] * n_dim),
                                    ub=np.array([1.0] * n_dim))
            controller.strategy = LCBStrategy(max_evals=max_evals,
                                              opt_prob=gp,
                                              exp_design=exp_design,
                                              surrogate=surrogate,
                                              asynchronous=asynchronous)
        elif method.lower() == 'random':
            controller.strategy = RandomStrategy(max_evals=max_evals,
                                                 opt_prob=gp)
        else:
            raise ValueError("Didn't recognize method passed to pysot")

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, gp.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()
        best_x = result.params[0].tolist()
        return (result.value, best_x,
                gp.feval_count) if with_count else (result.value, best_x)
Esempio n. 21
0
class PySOTOptimizer(AbstractOptimizer):
    primary_import = "pysot"

    def __init__(self, api_config):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)

        self.space_x = JointSpace(api_config)
        self.bounds = self.space_x.get_bounds()
        self.create_opt_prob()  # Sets up the optimization problem (needs self.bounds)
        self.max_evals = np.iinfo(np.int32).max  # NOTE: Largest possible int
        self.batch_size = None
        self.history = []
        self.proposals = []

    def create_opt_prob(self):
        """Create an optimization problem object."""
        opt = OptimizationProblem()
        opt.lb = self.bounds[:, 0]  # In warped space
        opt.ub = self.bounds[:, 1]  # In warped space
        opt.dim = len(self.bounds)
        opt.cont_var = np.arange(len(self.bounds))
        opt.int_var = []
        assert len(opt.cont_var) + len(opt.int_var) == opt.dim
        opt.objfun = None
        self.opt = opt

    def start(self, max_evals):
        """Starts a new pySOT run."""
        self.history = []
        self.proposals = []

        # Symmetric Latin hypercube design
        des_pts = max([self.batch_size, 2 * (self.opt.dim + 1)])
        slhd = SymmetricLatinHypercube(dim=self.opt.dim, num_pts=des_pts)

        # Warped RBF interpolant
        rbf = RBFInterpolant(dim=self.opt.dim, kernel=CubicKernel(), tail=LinearTail(self.opt.dim), eta=1e-4)
        rbf = SurrogateUnitBox(rbf, lb=self.opt.lb, ub=self.opt.ub)

        # Optimization strategy
        self.strategy = SRBFStrategy(
            max_evals=self.max_evals,
            opt_prob=self.opt,
            exp_design=slhd,
            surrogate=rbf,
            asynchronous=True,
            batch_size=1,
            use_restarts=True,
        )

    def suggest(self, n_suggestions=1):
        """Get a suggestion from the optimizer.

        Parameters
        ----------
        n_suggestions : int
            Desired number of parallel suggestions in the output

        Returns
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """

        if self.batch_size is None:  # First call to suggest
            self.batch_size = n_suggestions
            self.start(self.max_evals)

        # Set the tolerances pretending like we are running batch
        d, p = float(self.opt.dim), float(n_suggestions)
        self.strategy.failtol = p * int(max(np.ceil(d / p), np.ceil(4 / p)))

        # Now we can make suggestions
        x_w = []
        self.proposals = []
        for _ in range(n_suggestions):
            proposal = self.strategy.propose_action()
            record = EvalRecord(proposal.args, status="pending")
            proposal.record = record
            proposal.accept()  # This triggers all the callbacks

            # It is possible that pySOT proposes a previously evaluated point
            # when all variables are integers, so we just abort in this case
            # since we have likely converged anyway. See PySOT issue #30.
            x = list(proposal.record.params)  # From tuple to list
            x_unwarped, = self.space_x.unwarp(x)
            if x_unwarped in self.history:
                warnings.warn("pySOT proposed the same point twice")
                self.start(self.max_evals)
                return self.suggest(n_suggestions=n_suggestions)

            # NOTE: Append unwarped to avoid rounding issues
            self.history.append(copy(x_unwarped))
            self.proposals.append(proposal)
            x_w.append(copy(x_unwarped))

        return x_w

    def _observe(self, x, y):
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)
Esempio n. 22
0
    def fit(self, X, y=None, **kwargs):
        """Run training with cross validation.

        :param X: training data
        :param **: parameters to be passed to GridSearchCV
        """

        # wrap for pySOT
        class Target(OptimizationProblem):
            def __init__(self, outer):
                self.outer = outer
                param_def = outer.param_def
                self.lb = np.array([param['lb'] for param in param_def])
                self.ub = np.array([param['ub'] for param in param_def])
                self.dim = len(param_def)
                self.int_var = np.array([
                    idx for idx, param in enumerate(param_def)
                    if param['integer']
                ])
                self.cont_var = np.array([
                    idx for idx, param in enumerate(param_def)
                    if idx not in self.int_var
                ])

            def eval_(self, x):
                print('Eval {0} ...'.format(x))
                param_def = self.outer.param_def
                outer = self.outer
                # prepare parameters grid for gridsearchcv
                param_grid = ({
                    param['name']:
                    [int(x[idx]) if param['integer'] else x[idx]]
                    for idx, param in enumerate(param_def)
                })
                # create gridsearchcv to evaluate the cv
                gs = GridSearchCV(outer.estimator,
                                  param_grid,
                                  refit=False,
                                  **outer.kwargs)
                # never refit during iteration, refit at the end
                gs.fit(X, y=y, **kwargs)
                gs_score = gs.best_score_
                # gridsearchcv score is better when greater
                if not outer.best_score_ or gs_score > outer.best_score_:
                    outer.best_score_ = gs_score
                    outer.best_params_ = gs.best_params_
                # also record history
                outer.params_history_.append(x)
                outer.score_history_.append(gs_score)
                print('Eval {0} => {1}'.format(x, gs_score))
                # pySOT score is the lower the better, so return the negated
                return -gs_score

        # pySOT routine
        # TODO: make this configurable
        target = Target(self)
        rbf = SurrogateUnitBox(RBFInterpolant(dim=target.dim,
                                              kernel=CubicKernel(),
                                              tail=LinearTail(target.dim)),
                               lb=target.lb,
                               ub=target.ub)
        slhd = SymmetricLatinHypercube(dim=target.dim,
                                       num_pts=2 * (target.dim + 1))

        # Create a strategy and a controller
        controller = SerialController(objective=target.eval_)
        controller.strategy = SRBFStrategy(max_evals=self.n_iter,
                                           batch_size=1,
                                           opt_prob=target,
                                           exp_design=slhd,
                                           surrogate=rbf,
                                           asynchronous=False)

        print('Maximum number of evaluations: {0}'.format(self.n_iter))
        print('Strategy: {0}'.format(controller.strategy.__class__.__name__))
        print('Experimental design: {0}'.format(slhd.__class__.__name__))
        print('Surrogate: {0}'.format(rbf.__class__.__name__))

        # Run the optimization strategy
        result = controller.run()

        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}\n'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=5,
                         suppress_small=True)))