def fit(self, my_lambda, coverage=3, max_iters=50):

        #while(True):
        print('beta = ', self.my_beta)
        print('lambda = ', my_lambda)

        my_args = (self, my_lambda, coverage, False)

        nfeatures = self.train_args['feature'].shape[1]
        w0 = self.randomw0(nfeatures)

        sigma_value = 0.02
        print('sigma0 ', sigma_value)
        es = CMAEvolutionStrategy(w0,
                                  sigma0=sigma_value,
                                  inopts={
                                      'maxiter': max_iters,
                                      'popsize': 40
                                  })

        while not es.stop():
            solutions = es.ask()
            fitnesses = [
                InterestingnessLearner.cost(x, *my_args) for x in solutions
            ]
            es.tell(solutions, fitnesses)
            es.disp()

            self.nmcost(es.result[0], my_lambda, coverage, is_debug=True)

        final_model = self.createClassifier(es.result[0], coverage)

        print(final_model.size(), final_model.meanSupport())
        return final_model
def evolve_greedy_policies(model_dist: ModelDist,
                           iterations: int = 30,
                           population_size: int = 5):
    """
    Evolves the greedy policy to find the best policies

    :param model_dist: Model distribution
    :param iterations: Number of evolutions
    :param population_size: The population size
    """
    print(f'Evolves the greedy policies for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')

    eval_tasks, eval_servers = model_dist.generate_oneshot()
    lower_bound = greedy_algorithm(eval_tasks, eval_servers, ValuePriority(),
                                   ProductResources(),
                                   SumSpeed()).social_welfare
    print(f'Lower bound is {lower_bound}')
    reset_model(eval_tasks, eval_servers)

    evolution_strategy = CMAEvolutionStrategy(
        11 * [1], 0.2, {'population size': population_size})
    for iteration in range(iterations):
        suggestions = evolution_strategy.ask()
        tasks, servers = model_dist.generate_oneshot()

        solutions = []
        for i, suggestion in enumerate(suggestions):
            solutions.append(
                greedy_algorithm(
                    tasks, servers,
                    TaskPriorityEvoStrategy(i, *suggestion[:5]),
                    ServerSelectionEvoStrategy(i, *suggestion[5:8]),
                    ResourceAllocationEvoStrategy(
                        i, *suggestion[8:11])).social_welfare)
            reset_model(tasks, servers)

        evolution_strategy.tell(suggestions, solutions)
        evolution_strategy.disp()

        if iteration % 2 == 0:
            evaluation = greedy_algorithm(
                eval_tasks, eval_servers,
                TaskPriorityEvoStrategy(0, *suggestions[0][:5]),
                ServerSelectionEvoStrategy(0, *suggestions[0][5:8]),
                ResourceAllocationEvoStrategy(0, *suggestions[0][8:11]))
            print(f'Iter: {iteration} - {evaluation.social_welfare}')

    pprint.pprint(evolution_strategy.result())
Ejemplo n.º 3
0
def main():
    # param length
    # 10 fights
    # rating_mu, rating_sig, wins, losses, odds
    input_params_len = 10 * 2 * 4
    es = CMAEvolutionStrategy([0] * input_params_len, 0.5)

    while not es.stop():
        solutions = es.ask()
        func_vals = [get_betting_result(x) for x in solutions]
        es.tell(solutions, func_vals)
        es.logger.add()  # write data to disc to be plotted
        es.disp()

    es.result_pretty()
Ejemplo n.º 4
0
def run():
    train = 0
    
    names = [
        # 'bet_pred_a', 'bet_pred_b', 'bet_odds_a', 'bet_odds_b', 'bet_wnl_a', 'bet_wnl_b',
        'bet_ts_a', 'bet_ts_b', 'bet_tmi_a', 'bet_tmi_b', 'bet_tma_a', 'bet_tma_b',
    ]
    params = [
        0, 0, 0, 0, 0, 0
    ]
    bounds = [[-np.inf],
              [np.inf]]
    assert len(params) == len(names)
    # assert len(params) == len(bounds[0])

    if train:
        sigma = 1
        opts = CMAOptions()
        # opts['tolx'] = 1E-2
        opts['bounds'] = bounds
        es = CMAEvolutionStrategy(params, sigma, inopts=opts)
        while not es.stop():
            solutions = es.ask()
            fitness = [main(x, train=1) for x in solutions]
            es.tell(solutions, fitness)
            es.disp()
            print(list(es.result[0]))
            print(list(es.result[5]))
        es.result_pretty()
        print('')
        print('best')
        print(list(es.result[0]))
        print('')
        print('xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum')
        print(list(es.result[5]))

    else:
        main(params)
Ejemplo n.º 5
0
    def train(self, cost, model, x_data, y_data=None, tolfun=1e-11, popsize=None, maxiter=None, use_grad=False):
        """Trains the ``model`` using the custom `cost` function.

        Args:
            cost (theta.costfunctions): the cost function.
            model (theta.model.Model or theta.rtbm.RTBM): the model to be trained.
            x_data (numpy.array): the support data with shape (Nv, Ndata).
            y_data (numpy.array): the target prediction.
            tolfun (float): the maximum tolerance of the cost function fluctuation to stop the minimization.
            popsize (int): the population size.
            maxiter (int): the maximum number of iterations.
            use_grad (bool): if True the gradients for the cost and model are used in the minimization.

        Returns:
            numpy.array: the optimal parameters

        Note:
            The parameters of the model are changed by this algorithm.
        """

        initsol = np.real(model.get_parameters())
        args = {'bounds': model.get_bounds(),
                'tolfun': tolfun,
                'verb_log': 0}
        sigma = np.max(model.get_bounds()[1])*0.1

        if popsize is not None:
            args['popsize'] = popsize

        if maxiter is not None:
            args['maxiter'] = maxiter

        grad = None
        if use_grad:
            grad = worker_gradient

        es = CMAEvolutionStrategy(initsol, sigma, args)
        if self.num_cores > 1:
            with closing(mp.Pool(self.num_cores, initializer=worker_initialize,
                                 initargs=(cost, model, x_data, y_data))) as pool:
                while not es.stop():
                    f_values, solutions = [], []
                    while len(solutions) < es.popsize:
                        x = es.ask(es.popsize-len(solutions), gradf=grad)
                        curr_fit = pool.map_async(worker_compute, x).get()
                        for value, solution in zip(curr_fit,x):
                            if not np.isnan(value):
                                solutions.append(solution)
                                f_values.append(value)
                    es.tell(solutions, f_values)
                    es.disp()
                pool.terminate()
        else:
            worker_initialize(cost, model, x_data, y_data)
            while not es.stop():
                f_values, solutions = [], []
                while len(solutions) < es.popsize:
                    curr_fit = x = np.NaN
                    while np.isnan(curr_fit):
                        x = es.ask(1, gradf=grad)[0]
                        curr_fit = worker_compute(x)
                    solutions.append(x)
                    f_values.append(curr_fit)
                es.tell(solutions, f_values)
                es.disp()
        print(es.result)

        model.set_parameters(es.result[0])
        return es.result[0]
Ejemplo n.º 6
0
def run():
    train = 0

    names = [
        # 'pred_a', 'pred_b',             # 0.0
        # 'odds_a', 'odds_b',             # 2.1
        # 'bet_wnl_a', 'bet_wnl_b',       # 2.1
        # 'bet_ts_a', 'bet_ts_b',         # 1.5
        # 'bet_tmi_a', 'bet_tmi_b',       # 1.1
        # 'bet_tma_a', 'bet_tma_b',       # 0.9
        # 'bet_drs_a', 'bet_drs_b',       # 0.5
        'bet_sfc_a',
        'bet_sfc_b',  # -0.1

        # 'bet_spd_a', 'bet_spd_b',     # 0.8
        # 'bet_set_a', 'bet_set_b',       # 1.0
        # 'bet_gms_a', 'bet_gms_b',       # -0.2
        # 'bet_tie_a', 'bet_tie_b',       # 4.1
        # 'bet_ups_a', 'bet_ups_b',       # -0.2
        # 'bet_age_a', 'bet_age_b',       # -2.5
    ]
    params = [0, 0]
    bounds = [[-np.inf], [np.inf]]
    assert len(params) == len(names)
    assert len(params) == len(bounds)

    if train:
        sigma = 1
        opts = CMAOptions()
        # opts['tolx'] = 1E-2
        opts['bounds'] = bounds
        es = CMAEvolutionStrategy(params, sigma, inopts=opts)
        while not es.stop():
            solutions = es.ask()
            try:
                fitness = [main(x, train) for x in solutions]
            except ValueError as exc:
                print(str(exc))
                continue
            es.tell(solutions, fitness)
            es.disp()
            print(list(es.result[0]))
            print(list(es.result[5]))
        es.result_pretty()
        print(
            f'finished after {es.result[3]} evaluations and {es.result[4]} iterations'
        )
        print('')
        print('best')
        print(list(es.result[0]))
        print('')
        print(
            'xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum'
        )
        print(list(es.result[5]))

        # res = minimize(main, params, (train,), bounds=bounds)
        # print('')
        # print(f'{res.nit} iterations')
        # print(f'Success: {res.success} {res.message}')
        # print(f'Solution: {res.x}')
        # return

    else:
        main(params)
    def run_optimization(self):
        """
        Runs optimization job
        :return:
        """
        simulation_name = self.parse_args.input
        population_size = self.parse_args.population_size

        self.optim_param_mgr = OptimizationParameterManager()
        optim_param_mgr = self.optim_param_mgr

        # optim_param_mgr.parse(args.params_file)
        optim_param_mgr.parse(self.parse_args.params_file)

        starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(
            starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(
            simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev,
                                     {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
                print 'CURRENT PARAM SET=', param_set
                # distribution param_set to workers - run tasks spawns appropriate number of workers
                # given self.num_workers and the size of the param_set
                partial_return_result_vec = self.run_task(
                    workload_dict, param_set)

                return_result_vec = np.append(return_result_vec,
                                              partial_return_result_vec)

                print 'FINISHED PARAM_SET=', param_set

            optim.tell(param_set_list,
                       return_result_vec)  # do all the real "update" work
            optim.disp(20)  # display info every 20th iteration
            optim.logger.add()  # log another "data line"

        optimal_parameters = optim.result()[0]

        print('termination by', optim.stop())
        print('best f-value =', optim.result()[1])
        optimal_parameters_remapped = optim_param_mgr.params_from_0_1(
            optim.result()[0])
        print('best solution =', optimal_parameters_remapped)

        # print('best solution =', optim_param_mgr.params_from_0_1(optim.result()[0]))

        print optim_param_mgr.params_names

        self.save_optimal_parameters(optimal_parameters)
        self.save_optimal_simulation(optimal_parameters)
Ejemplo n.º 8
0
def run():
    train = 0

    names = [
        'bet_multi_param',
        'bet_tma_a',
        'bet_tma_b',
        'bet_lati_a',
        'bet_lati_b',
        'bet_tiew_a',
        'bet_tiew_b',

        # 'bet_upsr_a', 'bet_upsr_b',
        # 'bet_sfcw_a', 'bet_sfcw_b',
        # 'bet_wnll_a', 'bet_wnll_b',

        # 'bet_tier_a', 'bet_tier_b',
        # 'bet_upsl_a', 'bet_upsl_b',
        # 'bet_ts_a', 'bet_ts_b',

        # 'bet_wnlw_a', 'bet_wnlw_b',
        # 'bet_setw_a', 'bet_setw_b',
        # 'bet_setl_a', 'bet_setl_b',

        # 'bet_gms_a', 'bet_gms_b',
        # 'bet_drsl_a', 'bet_drsl_b',
        # 'bet_tmi_a', 'bet_tmi_b',

        # 'bet_wnlr_a', 'bet_wnlr_b',
        # 'bet_upsw_a', 'bet_upsw_b',
        # 'bet_drs_a', 'bet_drs_b',

        # 'bet_setr_a', 'bet_setr_b',
        # 'bet_drsw_a', 'bet_drsw_b',
        # 'bet_tiel_a', 'bet_tiel_b',

        # 'bet_age_a', 'bet_age_b',
        # 'bet_spd_a', 'bet_spd_b',

        # 'bet_sfcr_a', 'bet_sfcr_b',
    ]
    tolx = 10000  # more higher then longer time
    params = [-14, 0, 0, 0, 0, 0, 0]
    bounds = [[-np.inf], [np.inf]]
    assert len(params) == len(names)
    # assert len(params) == len(bounds)

    if train:
        time_start = time()
        mins = 60 * 4
        sigma = 1
        opts = CMAOptions()
        opts['bounds'] = bounds
        es = CMAEvolutionStrategy(params, sigma, inopts=opts)
        while not es.stop():
            solutions = es.ask()
            try:
                fitness = [main(x, train) for x in solutions]
            except ValueError as exc:
                print(str(exc))
                continue
            es.tell(solutions, fitness)
            es.disp()
            # print(list(es.result[0]))
            print(f'tolx={es.opts["tolx"]:.3f}  sol={list(es.result[5])}')
            es.opts['tolx'] = es.result[3] / tolx
            if time() - time_start > 60 * mins:
                print(f'{mins}min limit reached')
                break
        es.result_pretty()
        print(
            f'finished after {es.result[3]} evaluations and {es.result[4]} iterations'
        )
        # print('')
        # print('best')
        # print(list(es.result[0]))
        print('')
        print(
            'xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum'
        )
        print(list(es.result[5]))

        # pmin = -20
        # pmax = 20
        # step = (pmax - pmin) / 10
        # rranges = [
        #     slice(pmin, pmax, step),
        #     slice(pmin, pmax, step),
        # ]
        # res = optimize.brute(main, rranges, (train,), finish=None)
        # print(res)
        # return

        # res = minimize(main, params, (train,), bounds=bounds)
        # print('')
        # print(f'{res.nit} iterations')
        # print(f'Success: {res.success} {res.message}')
        # print(f'Solution: {res.x}')
        # return

    else:
        main(params)
Ejemplo n.º 9
0
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(
                        self.X_vec_check,
                        self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)
Ejemplo n.º 10
0
    def run_optimization(self):

        """
        Runs optimization job
        :return:
        """
        simulation_name = self.parse_args.input
        population_size = self.parse_args.population_size

        self.optim_param_mgr = OptimizationParameterManager()
        optim_param_mgr = self.optim_param_mgr

        # optim_param_mgr.parse(args.params_file)
        optim_param_mgr.parse(self.parse_args.params_file)

        starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev, {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
                print 'CURRENT PARAM SET=', param_set
                # distribution param_set to workers - run tasks spawns appropriate number of workers
                # given self.num_workers and the size of the param_set
                partial_return_result_vec = self.run_task(workload_dict, param_set)

                return_result_vec = np.append(return_result_vec, partial_return_result_vec)

                print 'FINISHED PARAM_SET=', param_set


            optim.tell(param_set_list, return_result_vec)  # do all the real "update" work
            optim.disp(20)  # display info every 20th iteration
            optim.logger.add()  # log another "data line"

        optimal_parameters = optim.result()[0]

        print('termination by', optim.stop())
        print('best f-value =', optim.result()[1])
        optimal_parameters_remapped = optim_param_mgr.params_from_0_1(optim.result()[0])
        print('best solution =', optimal_parameters_remapped)

        # print('best solution =', optim_param_mgr.params_from_0_1(optim.result()[0]))

        print optim_param_mgr.params_names

        self.save_optimal_parameters(optimal_parameters)
        self.save_optimal_simulation(optimal_parameters)
    x_mean = encoder.predict(img[np.newaxis, ...])
fitness_func = Fitness(img, decoder)
best_img = None
best_z = None
best_score = -1
for i in range(args.runs):
    print('Runs: %d / %d' % (i + 1, args.runs))
    if encoder is None:
        init = np.random.randn(decoder.input_shape[-1]) * args.std
    else:
        init = x_mean[0]
    es = ES(init, args.sigma)
    for ite in range(args.iterations):
        dnas = np.asarray(es.ask())
        es.tell(dnas, fitness_func(dnas))
        es.disp()
    es.result_pretty()
    z = np.asarray(es.result[0])
    img_reconstruct = decoder.predict(z[np.newaxis, ...])[0]
    mse = np.mean(np.square(img_reconstruct - img))
    print('mse: {:.2f}'.format(mse))
    if mse > best_score:
        best_score = mse
        best_z = z
        best_img = img_reconstruct
    output_img = np.round(
        np.concatenate(
            (np.squeeze(img), np.squeeze(img_reconstruct)), axis=1) * 127.5 +
        127.5).astype(np.uint8)
    filename, ext = os.path.splitext(args.output)
    imsave(filename + '_%d' % i + ext, output_img)
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(self.X_vec_check, self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)