def main(filename, directory):
    initialFilename = os.path.join(directory, "initial.json")
    genDir = os.path.join(directory, "gens")
    finalFilename = os.path.join(directory, "final.json")
    baseCondor = "base.condor"

    with open(baseCondor, "r") as f:
        condorConfig = f.read()

    optMkdir(directory)
    optMkdir(genDir)
    shutil.copyfile(filename, initialFilename)
    with open(filename, "r") as f:
        conf = json.load(f)
    paramNames = []
    paramVals = []
    flattenParams(conf, paramNames, paramVals, "")
    sigma0 = SIGMA0
    opts = Options()
    opts["popsize"] = POP_SIZE
    # opts.printme()
    cma = CMAEvolutionStrategy(paramVals, sigma0, opts)
    while (cma.countiter < NUM_GENS) and not (cma.stop()):
        thisGenDir = os.path.join(genDir, str(cma.countiter))
        optMkdir(thisGenDir)
        xs = cma.ask()
        xs, fits = runEvals(paramNames, xs, cma.countiter, thisGenDir, condorConfig)
        cma.tell(xs, fits)
    res = cma.result()
    paramsToJsonFile(finalFilename, paramNames, res[0])
Esempio n. 2
0
def main(filename, directory):
    initialFilename = os.path.join(directory, 'initial.json')
    genDir = os.path.join(directory, 'gens')
    finalFilename = os.path.join(directory, 'final.json')
    baseCondor = 'base.condor'

    with open(baseCondor, 'r') as f:
        condorConfig = f.read()

    optMkdir(directory)
    optMkdir(genDir)
    shutil.copyfile(filename, initialFilename)
    with open(filename, 'r') as f:
        conf = json.load(f)
    paramNames = []
    paramVals = []
    flattenParams(conf, paramNames, paramVals, '')
    sigma0 = SIGMA0
    opts = Options()
    opts['popsize'] = POP_SIZE
    #opts.printme()
    cma = CMAEvolutionStrategy(paramVals, sigma0, opts)
    while (cma.countiter < NUM_GENS) and not (cma.stop()):
        thisGenDir = os.path.join(genDir, str(cma.countiter))
        optMkdir(thisGenDir)
        xs = cma.ask()
        xs, fits = runEvals(paramNames, xs, cma.countiter, thisGenDir,
                            condorConfig)
        cma.tell(xs, fits)
    res = cma.result()
    paramsToJsonFile(finalFilename, paramNames, res[0])
def evolve_greedy_policies(model_dist: ModelDist,
                           iterations: int = 30,
                           population_size: int = 5):
    """
    Evolves the greedy policy to find the best policies

    :param model_dist: Model distribution
    :param iterations: Number of evolutions
    :param population_size: The population size
    """
    print(f'Evolves the greedy policies for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')

    eval_tasks, eval_servers = model_dist.generate_oneshot()
    lower_bound = greedy_algorithm(eval_tasks, eval_servers, ValuePriority(),
                                   ProductResources(),
                                   SumSpeed()).social_welfare
    print(f'Lower bound is {lower_bound}')
    reset_model(eval_tasks, eval_servers)

    evolution_strategy = CMAEvolutionStrategy(
        11 * [1], 0.2, {'population size': population_size})
    for iteration in range(iterations):
        suggestions = evolution_strategy.ask()
        tasks, servers = model_dist.generate_oneshot()

        solutions = []
        for i, suggestion in enumerate(suggestions):
            solutions.append(
                greedy_algorithm(
                    tasks, servers,
                    TaskPriorityEvoStrategy(i, *suggestion[:5]),
                    ServerSelectionEvoStrategy(i, *suggestion[5:8]),
                    ResourceAllocationEvoStrategy(
                        i, *suggestion[8:11])).social_welfare)
            reset_model(tasks, servers)

        evolution_strategy.tell(suggestions, solutions)
        evolution_strategy.disp()

        if iteration % 2 == 0:
            evaluation = greedy_algorithm(
                eval_tasks, eval_servers,
                TaskPriorityEvoStrategy(0, *suggestions[0][:5]),
                ServerSelectionEvoStrategy(0, *suggestions[0][5:8]),
                ResourceAllocationEvoStrategy(0, *suggestions[0][8:11]))
            print(f'Iter: {iteration} - {evaluation.social_welfare}')

    pprint.pprint(evolution_strategy.result())
Esempio n. 4
0
    def update_borders(self):
        for i in range(len(self.clusters)):
            cluster = self.clusters[i]
            len_bounds = np.linalg.norm(cluster.border[1] - cluster.border[1])
            es = CMAEvolutionStrategy(
                cluster.border.ravel().tolist(), len_bounds * 0.1, {
                    'bounds':
                    [self.boundary.min_bounds[0], self.boundary.max_bounds[0]]
                })
            while not es.stop():
                solutions = es.ask()
                #TODO
                es.tell(
                    solutions,
                    [self.evaluate_border(border, i) for border in solutions])
                #es.tell( solutions, [cluster.evaluate_border(border) for border in solutions] )

                x_best = es.result()[0]
                #if x_best is not None and cluster.in_global_border( x_best ):
                if x_best is not None:
                    cluster.border = x_best.reshape(cluster.border.shape)
            '''
    def run_optimization(self):
        """
        Runs optimization job
        :return:
        """
        simulation_name = self.parse_args.input
        population_size = self.parse_args.population_size

        self.optim_param_mgr = OptimizationParameterManager()
        optim_param_mgr = self.optim_param_mgr

        # optim_param_mgr.parse(args.params_file)
        optim_param_mgr.parse(self.parse_args.params_file)

        starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(
            starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(
            simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev,
                                     {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
                print 'CURRENT PARAM SET=', param_set
                # distribution param_set to workers - run tasks spawns appropriate number of workers
                # given self.num_workers and the size of the param_set
                partial_return_result_vec = self.run_task(
                    workload_dict, param_set)

                return_result_vec = np.append(return_result_vec,
                                              partial_return_result_vec)

                print 'FINISHED PARAM_SET=', param_set

            optim.tell(param_set_list,
                       return_result_vec)  # do all the real "update" work
            optim.disp(20)  # display info every 20th iteration
            optim.logger.add()  # log another "data line"

        optimal_parameters = optim.result()[0]

        print('termination by', optim.stop())
        print('best f-value =', optim.result()[1])
        optimal_parameters_remapped = optim_param_mgr.params_from_0_1(
            optim.result()[0])
        print('best solution =', optimal_parameters_remapped)

        # print('best solution =', optim_param_mgr.params_from_0_1(optim.result()[0]))

        print optim_param_mgr.params_names

        self.save_optimal_parameters(optimal_parameters)
        self.save_optimal_simulation(optimal_parameters)
Esempio n. 6
0
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(
                        self.X_vec_check,
                        self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)
Esempio n. 7
0
    def run_optimization(self):

        """
        Runs optimization job
        :return:
        """
        simulation_name = self.parse_args.input
        population_size = self.parse_args.population_size

        self.optim_param_mgr = OptimizationParameterManager()
        optim_param_mgr = self.optim_param_mgr

        # optim_param_mgr.parse(args.params_file)
        optim_param_mgr.parse(self.parse_args.params_file)

        starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev, {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
                print 'CURRENT PARAM SET=', param_set
                # distribution param_set to workers - run tasks spawns appropriate number of workers
                # given self.num_workers and the size of the param_set
                partial_return_result_vec = self.run_task(workload_dict, param_set)

                return_result_vec = np.append(return_result_vec, partial_return_result_vec)

                print 'FINISHED PARAM_SET=', param_set


            optim.tell(param_set_list, return_result_vec)  # do all the real "update" work
            optim.disp(20)  # display info every 20th iteration
            optim.logger.add()  # log another "data line"

        optimal_parameters = optim.result()[0]

        print('termination by', optim.stop())
        print('best f-value =', optim.result()[1])
        optimal_parameters_remapped = optim_param_mgr.params_from_0_1(optim.result()[0])
        print('best solution =', optimal_parameters_remapped)

        # print('best solution =', optim_param_mgr.params_from_0_1(optim.result()[0]))

        print optim_param_mgr.params_names

        self.save_optimal_parameters(optimal_parameters)
        self.save_optimal_simulation(optimal_parameters)
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(self.X_vec_check, self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)