Example #1
0
def test_multiretry(num_retries = min(256, 8*mp.cpu_count()), 
             keep = 0.7, optimizer = de2_cma(1500), logger = logger(), repeat = 10):
    seqs = Tandem(0).seqs
    n = len(seqs)
    problems = [Tandem(i) for i in range(n)]
    ids = [str(seqs[i]) for i in range(n)]
    t0 = time.perf_counter()
    for _ in range(repeat):
        # check all variants
        problem_stats = multiretry.minimize(problems, ids, num_retries, keep, optimizer, logger)
        ps = problem_stats[0]
        
#         for _ in range(10):
#             # improve the best variant using only one node
#             fval = ray.get(ps.retry.remote(optimizer))
#             logger.info("improve best variant " + ray.get(ps.name.remote()) 
#                         + ' ' + str(ray.get(ps.id.remote()))
#                         + ' ' + str(ray.get(ps.value.remote())) 
#                         + ' time = ' + str(dtime(t0)))
#             if fval < -1490:
#                 break           
            
        # optimize best variant starting from scratch using all nodes
        logger.info("improve best variant " + ray.get(ps.name.remote()) 
                    + ' ' + str(ray.get(ps.id.remote()))
                    + ' ' + str(ray.get(ps.value.remote())) 
                    + ' time = ' + str(dtime(t0)))
        problem = problems[ray.get(ps.index.remote())]
        _rayoptimizer(optimizer, problem, 1, max_time = 1200, log = logger)
Example #2
0
def _test_archipelago(algo, problem, num=10000, stop_val=-1E99, log=logger()):
    udp = pygmo_udp(problem.fun, problem.bounds)
    prob = pg.problem(udp)
    best_y = math.inf
    best_x = None
    t0 = time.perf_counter()

    for _ in range(num):
        archi = pg.archipelago(n=mp.cpu_count(),
                               algo=algo,
                               prob=prob,
                               pop_size=64)
        archi.evolve()
        archi.wait()
        ys = archi.get_champions_f()
        if not ys is None and len(ys) > 0:
            sort = np.argsort([y[0] for y in ys])
            y = ys[sort[0]][0]
            if y < best_y:
                best_y = y
                best_x = archi.get_champions_x()[sort[0]]
                message = '{0} {1} {2} {3!s}'.format(problem.name, dtime(t0),
                                                     best_y, list(best_x))
                log.info(message)
                if best_y < stop_val:
                    break
    return OptimizeResult(x=best_x, fun=best_y, success=True)
Example #3
0
def test_cma_original(problem, num):
    import cma
    best = math.inf
    lb = problem.bounds.lb
    ub = problem.bounds.ub
    t0 = time.perf_counter()
    for i in range(num):
        guess = random_x(problem.bounds.lb, problem.bounds.ub)
        es = cma.CMAEvolutionStrategy(
            guess, 0.1, {
                'bounds': [lb, ub],
                'popsize': 31,
                'typical_x': typical(lb, ub),
                'scaling_of_variables': scale(lb, ub),
                'verbose': -1,
                'verb_disp': -1
            })
        for j in range(50000):
            X, Y = es.ask_and_eval(problem.fun)
            es.tell(X, Y)
            if es.stop():
                break
        best = min(es.result.fbest, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, es.result.fbest))
Example #4
0
def minimize_plot(name,
                  optimizer,
                  fun,
                  bounds,
                  value_limit=math.inf,
                  plot_limit=math.inf,
                  num_retries=1024,
                  workers=mp.cpu_count(),
                  logger=logger(),
                  stop_fitness=-math.inf,
                  statistic_num=5000):
    time0 = time.perf_counter()  # optimization start time
    name += '_' + optimizer.name
    logger.info('optimize ' + name)
    store = Store(fun,
                  bounds,
                  capacity=500,
                  logger=logger,
                  statistic_num=statistic_num)
    ret = retry(store, optimizer.minimize, value_limit, workers, stop_fitness)
    impr = store.get_improvements()
    np.savez_compressed(name, ys=impr)
    filtered = np.array([imp for imp in impr if imp[1] < plot_limit])
    if len(filtered) > 0: impr = filtered
    logger.info(name + ' time ' + str(dtime(time0)))
    plot(impr,
         'progress_aret.' + name + '.png',
         label=name,
         xlabel='time in sec',
         ylabel=r'$f$')
    return ret
Example #5
0
 def add_statistics(self):
     if self.statistic_num > 0:
         si = self.si.value
         if si < self.statistic_num - 1:
             self.si.value = si + 1
         self.time[si] = dtime(self.t0)
         self.val[si] = self.best_y.value  
Example #6
0
    def __call__(self, x):
        # determine the minimal station mass
        min_mass, slot_mass = select(self.asteroid, self.station,
                                     self.trajectory, self.mass,
                                     self.transfer_start, self.transfer_time,
                                     x)
        sdv = select_dvs(self.trajectory_dv, x)
        y = -score(min_mass, sdv)
        self.evals.value += 1
        if y < self.best_y.value:
            self.best_y.value = y
            trajectories = trajectory_selection(x, TRAJECTORY_NUM)[0]
            stations = dyson_stations(x, STATION_NUM)
            times = timings(x, STATION_NUM)

            sc = score(np.amin(slot_mass), sdv)

            logger().info(
                "evals = {0}: time = {1:.1f} s = {2:.0f} a = {3:.0f} t = {4:s} s = {5:s} b = {6:s} m = {7:s} dv = {8:s}"
                .format(self.evals.value, dtime(self.t0), sc,
                        ast_num(x, self.asteroid, self.trajectory),
                        str([round(ti, 2) for ti in times[1:-1]]),
                        str([int(si) for si in stations]),
                        str([int(ti) for ti in trajectories]),
                        str([round(mi, 2) for mi in slot_mass * 1E-15]),
                        str([round(di, 2) for di in sdv])))
        return y
Example #7
0
def cv_score(X):
    X = X[0]
    score = cross_val_score(
        XGBRegressor(
            colsample_bytree=X[0],
            gamma=X[1],
            min_child_weight=X[2],
            learning_rate=X[3],
            max_depth=int(X[4]),
            n_estimators=10000,
            reg_alpha=X[5],
            reg_lambda=X[6],
            subsample=X[7],
            n_jobs=1  # required for cmaes with multiple workers
        ),
        train_x,
        train_y,
        scoring='neg_mean_squared_error').mean()

    score = np.array(score)

    global f_evals
    f_evals.value += 1
    global best_f
    if best_f.value < score:
        best_f.value = score

    logger.info(
        "time = {0:.1f} y = {1:.5f} f(xmin) = {2:.5f} nfev = {3} {4}".format(
            dtime(t0), score, best_f.value, f_evals.value, X))

    return score
Example #8
0
def test_differential_evolution(problem, num):
    best = math.inf
    t0 = time.perf_counter()
    for i in range(num):
        ret = differential_evolution(problem.fun, bounds=problem.bounds)
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #9
0
def print_result(ret, best, t0, i):
    x = _feasible(ret.x)  # make sure result is _feasible / discrete
    val = ratio(x)
    if val < best:
        best = min(val, best)
    print("{0}: time = {1:.1f} best = {2:.3E} f(xmin) = {3:.3E} x = {4:s}".
          format(i, dtime(t0), best, val, str(x)))
    return best
Example #10
0
def test_cma_cpp(problem, num):
    best = math.inf
    t0 = time.perf_counter()
    for i in range(num):
        ret = cmaescpp.minimize(problem.fun, bounds=problem.bounds)
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #11
0
def test_retry_cma_python(problem, num):
    best = math.inf
    t0 = time.perf_counter();    
    for i in range(num):
        ret = retry.minimize(problem.fun, bounds = problem.bounds, 
                num_retries = 2000, optimizer = Cma_python(100000))
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}"
              .format(i+1, dtime(t0), best, ret.fun))
Example #12
0
def test_retry(problem, num, log=None):
    best = math.inf
    t0 = time.perf_counter();    
    for i in range(num):
        ret = retry.minimize(problem.fun, bounds = problem.bounds, 
                num_retries = 2000, max_evaluations = 100000, logger=log)
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}"
              .format(i+1, dtime(t0), best, ret.fun))
Example #13
0
 def fun(self, x):
     self.evals.value += 1
     y = self.f8fun(x)
     if y < self.best_y.value:
         self.best_y.value = y
         logger().info(
             str(dtime(self.t0)) + ' ' + str(self.evals.value) + ' ' +
             str(self.best_y.value) + ' ' + str(list(x)))
     return y
Example #14
0
def test_scipy_minimize(problem, num):
    best = math.inf
    t0 = time.perf_counter()
    for i in range(num):
        guess = random_x(problem.bounds.lb, problem.bounds.ub)
        ret = minimize(problem.fun, x0=guess, bounds=problem.bounds)
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #15
0
def print_result(ret, best, t0, i):
    val = feasable_cost_penalty(ret.x)
    x = _feasible(ret.x)  # make sure result is _feasible
    if val < best:
        best = val
        print(
            "{0}: time = {1:.1f} best = {2:.8f} f(xmin) = {3:.5f} ineq = {4:.8f} x = {5:s}"
            .format(i + 1, dtime(t0), best, weight(x), penalty(x), str(x)))
    return best
Example #16
0
def test_cma_parallel(problem, num):
    best = math.inf
    t0 = time.perf_counter();
    for i in range(num):
        ret = cmaes.minimize(problem.fun, bounds = problem.bounds, workers = mp.cpu_count())
        if best > ret.fun or i % 100 == 99:
            print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}"
              .format(i+1, dtime(t0), best, ret.fun))
        best = min(ret.fun, best)
Example #17
0
def test_advretry(problem, value_limit, num):
    best = math.inf
    t0 = time.perf_counter();    
    for i in range(num):
        ret = advretry.minimize(problem.fun, bounds = problem.bounds, num_retries = 4000, 
                   value_limit = value_limit)
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}"
              .format(i+1, dtime(t0), best, ret.fun))
Example #18
0
def test_cma_cpp(problem, num):
    best = math.inf
    t0 = time.perf_counter();
    for i in range(num):
        ret = cmaescpp.minimize(problem.fun, max_evaluations = 100000, bounds = problem.bounds)
        if best > ret.fun or i % 100 == 99:
            print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}"
              .format(i+1, dtime(t0), best, ret.fun))
        best = min(ret.fun, best)
Example #19
0
def test_dual_annealing(problem, num):
    best = math.inf
    lb = problem.bounds.lb
    ub = problem.bounds.ub
    t0 = time.perf_counter()
    for i in range(num):
        ret = dual_annealing(problem.fun, bounds=list(zip(lb, ub)))
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #20
0
 def dump_all(self, iter):
     idx = self.values_all().argsort()
     self.all_stats = list(np.asarray(self.all_stats)[idx])
     logger().info("iteration " + str(iter))
     for i in range(len(self.all_stats)):
         ps = self.all_stats[i]
         logger().info("problem " + ray.get(ps.name.remote()) 
                       + ' ' + str(ray.get(ps.id.remote())) 
                       + ' ' + str(ray.get(ps.value.remote())) 
                       + ' time = ' + str(dtime(self.t0)))
Example #21
0
def print_result(ret, best, t0, i):
    x = _feasible(ret.x)  # make sure result is _feasible
    w = weight(x)
    val = weight_penalty(x)  # add penalty for ineq constraint violation
    if val < best:
        pen = penalty(x)  # check ineq constraint
        best = min(val, best)
        print(
            "{0}: time = {1:.1f} best = {2:.8f} f(xmin) = {3:.8f} ineq = {4:.5f}"
            .format(i + 1, dtime(t0), best, w, pen))
    return best
Example #22
0
def test_shgo(problem, num):
    best = math.inf
    lb = problem.bounds.lb
    ub = problem.bounds.ub
    t0 = time.perf_counter()
    for i in range(num):
        ret = shgo(problem.fun,
                   bounds=list(zip(lb, ub)),
                   n=300,
                   sampling_method='sobol')
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #23
0
 def dump(self):
     """logs the current status of the store if logger defined."""
     if self.logger is None:
         return
     Ys = self.get_ys()
     vals = []
     for i in range(min(20, len(Ys))):
         vals.append(round(Ys[i],2))     
     dt = dtime(self.t0)            
     message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6} {7} {8!s} {9!s}'.format(
         dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, 
         self.best_y.value, self.worst_y.value, self.num_stored.value, int(self.eval_fac.value), 
         vals, self.best_x[:])
     self.logger.info(message)
Example #24
0
def test_cma_parallel_eval(dim=6):
    # parallel function evaluation using CMA-ES
    t0 = time.perf_counter()
    evals = 0
    for i in range(1000):
        ret = csmacpp.minimize(obj_f_c,
                               bounds(dim),
                               popsize=32,
                               max_evaluations=50000,
                               workers=mp.cpu_count())
        evals += ret.nfev
        print("{0}: time = {1:.1f} fun = {2:.3f} nfev = {3}".format(
            i + 1, dtime(t0), ret.fun, evals))
    return ret
Example #25
0
    def dump(self):
        """logs the current status of the store if logger defined."""
        if self.logger is None:
            return
        Ys = self.get_ys()
        vals = []
        for i in range(min(20, len(Ys))):
            vals.append(round(Ys[i], 2))
        dt = dtime(self.t0)

        message = '{0} {1} {2} {3} {4:.4f} {5:.2f} {6:.2f} {7!s} {8!s}'.format(
            dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, \
                self.best_y.value, self.get_y_mean(), self.get_y_standard_dev(), vals, self.get_x(0))
        self.logger.info(message)
Example #26
0
def test_advretry_cma_python(problem, value_limit, num):
    best = math.inf
    t0 = time.perf_counter()
    for i in range(num):
        ret = advretry.minimize(problem.fun,
                                bounds=problem.bounds,
                                num_retries=4000,
                                optimizer=Cma_python(2000),
                                value_limit=value_limit,
                                statistic_num=5000,
                                logger=logger())
        best = min(ret.fun, best)
        print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format(
            i + 1, dtime(t0), best, ret.fun))
Example #27
0
def test_gclde_parallel_eval(problem):
    # parallel function evaluation using GCL_DE
    t0 = time.perf_counter()
    evals = 0
    for i in range(100000):
        ret = gcldecpp.minimize(problem.fun,
                                problem.bounds,
                                popsize=256,
                                max_evaluations=200000,
                                workers=mp.cpu_count())
        evals += ret.nfev
        print("{0}: time = {1:.1f} fun = {2:.3f} nfev = {3}".format(
            i + 1, dtime(t0), ret.fun, evals))
    return ret
Example #28
0
 def wrapper(self, x):
     y = self.fun(x)
     self.sevals.value += 1
     if y < self.bval.value:
         self.bval.value = y
         si = self.si.value
         if si < self.statistic_num - 1:
             self.si.value = si + 1
         self.time[si] = dtime(self.t0)
         self.val[si] = y
         if not self.logger is None:
             self.logger.info(
                 str(self.time[si]) + ' ' + str(self.sevals.value) + ' ' +
                 str(y) + ' ' + str(list(x)))
     return y
Example #29
0
def adv_minimize_plot(name, optimizer, fun, bounds,
                   value_limit = math.inf, num_retries = 1024, logger=logger(), statistic_num = 0):
    time0 = time.perf_counter() # optimization start time
    name += '_smart_' + optimizer.name
    logger.info('smart optimize ' + name) 
    store = advretry.Store(lambda x:fun(x)[0], bounds, capacity=5000, logger=logger, 
                           num_retries=num_retries, statistic_num = statistic_num) 
    advretry.retry(store, optimizer.minimize, value_limit)
    xs = np.array(store.get_xs())
    ys = np.array([fun(x) for x in xs])
    retry.plot(ys, '_all_' + name + '.png', interp=False)
    np.savez_compressed(name , xs=xs, ys=ys)
    xs, front = pareto(xs, ys)
    logger.info(name+ ' time ' + str(dtime(time0))) 
    retry.plot(front, '_front_' + name + '.png')
Example #30
0
def test_de_cma_parallel_retry(problem):
    # parallel optimization retry using a DE -> CMA-ES sequence
    t0 = time.perf_counter()
    evals = 0
    for i in range(1000):
        ret = retry.minimize(problem.fun,
                             problem.bounds,
                             logger=logger(),
                             optimizer=de_cma(50000),
                             value_limit=10)

        evals += ret.nfev
        print("{0}: time = {1:.1f} fun = {2:.3f} nfev = {3}".format(
            i + 1, dtime(t0), ret.fun, evals))
    return ret