Example #1
0
def main():
    # get config vars
    config = cli.init()
    population_size_nsgaii = config['POPULATION_SIZE_NSGAII']
    number_of_runs_nsgaii = config['NUMBER_OF_RUNS_NSGAII']
    number_of_runs_ga = config['NUMBER_OF_RUNS_GA']
    population_size_ga = config['POPULATION_SIZE_GA']
    config_path = config['TEST_DATA_PATH']
    ga_weights = config['GA_WEIGHTS']
    budget_constraint = config['BUDGET_CONSTRAINT']
    # parse and get specific data
    data = test_data.parse(config_path)
    requirements = data[0]
    clients = data[1]
    # run NSGA-II multi-objective algorithm
    print(datetime.datetime.now())
    print('Running NSGA-II...')
    NRP_multi = NRP_MOO(requirements, clients, budget_constraint)
    algorithm = NSGAII(NRP_multi.generate_problem(),
                       population_size=population_size_nsgaii)
    algorithm.run(number_of_runs_nsgaii)
    NSGAII_solutions = unique(nondominated(algorithm.result))
    # run GA single-objective algorithm with different weights
    GA_solutions = []
    for ga_weight in ga_weights:
        print(datetime.datetime.now())
        print('Running GA for weights ' + str(ga_weight) + ' and ' +
              str(1 - ga_weight) + '...')
        NRP_single = NRP_SOO(requirements, clients, budget_constraint,
                             ga_weight, 1 - ga_weight)
        algorithm = GeneticAlgorithm(NRP_single.generate_problem(),
                                     population_size=population_size_ga)
        algorithm.run(number_of_runs_ga)
        GA_solutions.extend(unique(nondominated(algorithm.result)))
    # run random algorithm
    print(datetime.datetime.now())
    print('Generating random solution...')
    NRP_random = NRP_Random(requirements, clients, budget_constraint)
    random_solutions = NRP_random.generate_solutions()
    print('done!')
    # draw graphs
    results.draw_graphs([
        results.get_graph_data_nsga_ii(NSGAII_solutions),
        results.get_graph_data_ga(GA_solutions, requirements, clients,
                                  budget_constraint),
        results.get_graph_data_ga(random_solutions, requirements, clients,
                                  budget_constraint)
    ])
Example #2
0
def to_dataframe(optimizer, dvnames, outcome_names):
    '''helper function to turn results of optimization into a pandas DataFrame
    
    Parameters
    ----------
    optimizer : platypus algorithm instance
    dvnames : list of str
    outcome_names : list of str
    
    Returns
    -------
    pandas DataFrame
    
    
    '''
    
    solutions = []
    for solution in platypus.unique(platypus.nondominated(optimizer.result)):
        decision_vars = dict(zip(dvnames, solution.variables))
        decision_out = dict(zip(outcome_names, solution.objectives))
        
        result = decision_vars.copy()
        result.update(decision_out)
        
        solutions.append(result)

    results = pd.DataFrame(solutions, columns=dvnames+outcome_names)
    return results
Example #3
0
def solutions_to_df(solutions: List[platypus.Solution], problem, parts='all', flag_optimal=True) -> pd.DataFrame:
    """Converts a list of platypus solutions to a DataFrame, with one row corresponding to each solution

    :param solutions: list of solutions to convert
    :param problem: the column names for DataFrame
    :param parts: which parts of the solutions should be kept
    :param flag_optimal: whether to include a boolean column denoting whether each solution is pareto-optimal
    :return: a DataFrame
    """

    def to_col_vals(solution_list):
        return list(zip(*(solution_to_values(solution, parts) for solution in solution_list)))

    solutions = platypus.unique(solutions)
    non_dominated = platypus.nondominated(solutions)
    columns = problem.names(parts)
    values, non_dom_vals = to_col_vals(solutions), to_col_vals(non_dominated)
    assert len(columns) == len(values), f'{len(values)} values does not match {len(columns)} columns'
    # TODO: Intuit the dataframe column types based on the types of the parameters of the problem
    # or use the to_df method of the problem object
    solution_df = pd.DataFrame({column: data for column, data in zip(columns, values)})  # , dtype=float
    if flag_optimal:
        non_dom_df = pd.DataFrame({column: data for column, data in zip(columns, non_dom_vals)})  # , dtype=float
        df = pd.merge(solution_df, non_dom_df, how='outer', indicator='pareto-optimal')
        df['pareto-optimal'] = df['pareto-optimal'] == 'both'
        return df
    return solution_df
	def __call__(self, optimizer):
		n_solutions = 0
		if platypus is None: raise ModuleNotFoundError("platypus")
		for _ in platypus.unique(platypus.nondominated(optimizer.result)):
			n_solutions += 1
		self.results.append(n_solutions)
		super().__call__(optimizer)
def to_dataframe(optimizer, dvnames, outcome_names):
    '''helper function to turn results of optimization into a pandas DataFrame

    Parameters
    ----------
    optimizer : platypus algorithm instance
    dvnames : list of str
    outcome_names : list of str

    Returns
    -------
    pandas DataFrame

    '''

    solutions = []
    for solution in platypus.unique(platypus.nondominated(optimizer.result)):
        vars = transform_variables(solution.problem, # @ReservedAssignment
                                   solution.variables)  
        
        decision_vars = dict(zip(dvnames, vars))
        decision_out = dict(zip(outcome_names, solution.objectives))

        result = decision_vars.copy()
        result.update(decision_out)

        solutions.append(result)

    results = pd.DataFrame(solutions, columns=dvnames+outcome_names)
    return results
Example #6
0
def optimize(model,
             algorithm="NSGAII",
             NFE=10000,
             module="platypus",
             progress_bar=None,
             **kwargs):
    module = __import__(module, fromlist=[''])
    class_ref = getattr(module, algorithm)

    args = kwargs.copy()
    args["problem"], levers = _to_problem(model)

    instance = class_ref(**args)
    if progress_bar is not None:
        pbar = progress_bar(total=NFE)
        callback = lambda x: pbar.update(x.nfe - pbar.n)
    else:
        callback = None
    instance.run(NFE, callback=callback)

    result = DataSet()

    print("here")

    for solution in unique(nondominated(instance.result)):
        if not solution.feasible:
            continue

        env = OrderedDict()
        offset = 0

        # decode from Platypus' internal representation (this should be fixed in Platypus instead)
        vars = [
            solution.problem.types[i].decode(solution.variables[i])
            for i in range(solution.problem.nvars)
        ]

        for lever, length in levers:
            env[lever.name] = lever.from_variables(vars[offset:(offset +
                                                                length)])
            offset += length

        if any([
                r.dir not in [Response.MINIMIZE, Response.MAXIMIZE]
                for r in model.responses
        ]):
            # if there are any responses not included in the optimization, we must
            # re-evaluate the model to get all responses
            print("reeval")
            env = evaluate(model, env)
        else:
            for i, response in enumerate(model.responses):
                env[response.name] = solution.objectives[i]

        result.append(env)

    return result
    def _prune(self):
        problem = Problem(len(self.ensemble_), 2)
        problem.types[:] = Integer(0, 1)
        problem.directions[0] = Problem.MAXIMIZE
        problem.directions[1] = Problem.MAXIMIZE
        problem.function = functools.partial(MCE._evaluate_imbalance,
                                             y_predicts=self._y_predict,
                                             y_true=self._y_valid)

        algorithm = NSGAII(problem)
        algorithm.run(10000)

        solutions = unique(nondominated(algorithm.result))
        objectives = [sol.objectives for sol in solutions]

        def extract_variables(variables):
            extracted = [v[0] for v in variables]
            return extracted

        self._ensemble_quality = self.get_group(
            extract_variables(solutions[objectives.index(
                max(objectives, key=itemgetter(0)))].variables),
            self.ensemble_)
        self._ensemble_diversity = self.get_group(
            extract_variables(solutions[objectives.index(
                max(objectives, key=itemgetter(1)))].variables),
            self.ensemble_)
        self._ensemble_balanced = self.get_group(
            extract_variables(solutions[objectives.index(
                min(objectives, key=lambda i: abs(i[0] - i[1])))].variables),
            self.ensemble_)

        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='quality_single')
        self._ensemble_quality_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
        # pareto_set, fitnesses = self._genetic_optimalisation(optimalisation_type='diversity_single')
        # self._ensemble_diversity_single = self.get_group(pareto_set[fitnesses.index(min(fitnesses, key=itemgetter(0)))],
        #                                                  self.ensemble_)

        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='precision_single')
        self._ensemble_precision_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
        pareto_set, fitnesses = self._genetic_optimalisation(
            optimalisation_type='recall_single')
        self._ensemble_recall_single = self.get_group(
            pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))],
            self.ensemble_)
Example #8
0
    def determine(self, runs=10000):

        # Open clarification -
        # caused PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup
        # with ProcessPoolEvaluator() as evaluator:
        # algorithm = GeneticAlgorithm(self, evaluator=evaluator)
        # algorithm.run(runs)

        algorithm = GeneticAlgorithm(self)
        logger.debug('trigger GEA optimization run')
        algorithm.run(runs)
        logger.debug('GEA done')

        return unique(nondominated(algorithm.result))
Example #9
0
def robust_optimize(model, SOWs, algorithm="NSGAII", NFE=10000, obj_aggregate=None, constr_aggregate=None, **kwargs):
    module = __import__("platypus", fromlist=[""])
    class_ref = getattr(module, algorithm)

    if obj_aggregate is None:
        from .robustness import mean

        obj_aggregate = mean

    if constr_aggregate is None:
        constr_aggregate = max

    args = kwargs.copy()
    args["problem"] = _to_robust_problem(model, SOWs, obj_aggregate, constr_aggregate)

    instance = class_ref(**args)
    instance.run(NFE)

    result = DataSet()

    for solution in unique(nondominated(instance.result)):
        if not solution.feasible:
            continue

        env = OrderedDict()
        offset = 0

        # decode from Platypus' internal representation (this should be fixed in Platypus instead)
        vars = [solution.problem.types[i].decode(solution.variables[i]) for i in range(solution.problem.nvars)]

        for lever in model.levers:
            env[lever.name] = lever.from_variables(vars[offset : (offset + lever.length)])
            offset += lever.length

        if any([r.type not in [Response.MINIMIZE, Response.MAXIMIZE] for r in model.responses]):
            # if there are any responses not included in the optimization, we must
            # re-evaluate the model to get all responses
            env = evaluate(model, env)

        # here we copy over the objectives from the evaluated solution, which has been aggregated over all SOWs
        for i, response in enumerate([r for r in model.responses if r.type in [Response.MINIMIZE, Response.MAXIMIZE]]):
            env[response.name] = solution.objectives[i]

        result.append(env)

    return result
Example #10
0
def robust_optimize(model, SOWs, algorithm="NSGAII", NFE=10000, obj_aggregate=None, constr_aggregate=None, **kwargs):
    module = __import__("platypus", fromlist=[''])
    class_ref = getattr(module, algorithm)
    
    if obj_aggregate is None:
        from .robustness import mean
        obj_aggregate = mean
             
    if constr_aggregate is None:
        constr_aggregate = max
    
    args = kwargs.copy()
    args["problem"], levers = _to_robust_problem(model, SOWs, obj_aggregate, constr_aggregate)
    
    instance = class_ref(**args)
    instance.run(NFE)
    
    result = DataSet()
    
    for solution in unique(nondominated(instance.result)):
        if not solution.feasible:
            continue
        
        env = OrderedDict()
        offset = 0
        
        # decode from Platypus' internal representation (this should be fixed in Platypus instead)
        vars = [solution.problem.types[i].decode(solution.variables[i]) for i in range(solution.problem.nvars)]
        
        for lever, length in levers:
            env[lever.name] = lever.from_variables(vars[offset:(offset+length)])
            offset += length
        
        if any([r.dir not in [Response.MINIMIZE, Response.MAXIMIZE] for r in model.responses]):
            # if there are any responses not included in the optimization, we must
            # re-evaluate the model to get all responses
            env = evaluate(model, env)
            
        # here we copy over the objectives from the evaluated solution, which has been aggregated over all SOWs
        for i, response in enumerate([r for r in model.responses if r.dir in [Response.MINIMIZE, Response.MAXIMIZE]]):
            env[response.name] = solution.objectives[i]
            
        result.append(env)
        
    return result
Example #11
0
def platypus_alg(evaluator: AbstractEvaluator,
                 algorithm: Type[platypus.Algorithm], evaluations: int = conf['evaluations'],
                 *args, **kwargs) -> pd.DataFrame:
    """Uses a platypus algorithm to optimise over an evaluator

    :param evaluator: An evaluation function to optimise over.
    :param algorithm: The platypus algorithm to use.
    :param evaluations: The algorithm will be stopped once it uses more than this many evaluations.
    :param args: arguments to pass to `algorithm`
    :param kwargs: keyword arguments to pass to `algorithm`.
    :return: the non-dominated solutions found by the algorithm.
    """

    problem = evaluator.to_platypus()
    alg: platypus.AbstractGeneticAlgorithm = algorithm(problem, *args, **kwargs)
    alg.run(evaluations)
    # TODO: Have a smarter default for parts (instead of 'all')
    return solutions_to_df(platypus.unique(alg.result), evaluator.problem)
Example #12
0
    def run(self, algorithm: AbstractGeneticAlgorithm,
            nruns: int) -> List[NRPSolution]:
        algorithm.run(nruns)
        print(len(algorithm.result))
        # Only unique non-dominated solutions
        solutions: List[Solution] = unique(nondominated(algorithm.result))
        solutions = [sol for sol in solutions if sol.feasible]
        print(len(solutions))

        result: List[NRPSolution] = make_solutions(self.nrp_instance,
                                                   solutions)
        # Sorting for 2 objectives First maximize score and then minimize cost
        result = sorted(result, key=lambda x: x.total_score, reverse=True)
        if self.is_last_single:
            result = sorted(result, key=lambda x: x.total_cost)
            # Taking only solution with minimal cost
            result = [result[0]]
        return result
Example #13
0
from platypus import GeneticAlgorithm, Problem, Constraint, Binary, nondominated, unique

# This simple example has an optimal value of 15 when picking items 1 and 4.
items = 7
capacity = 9
weights = [2, 3, 6, 7, 5, 9, 4]
profits = [6, 5, 8, 9, 6, 7, 3]
    
def knapsack(x):
    selection = x[0]
    total_weight = sum([weights[i] if selection[i] else 0 for i in range(items)])
    total_profit = sum([profits[i] if selection[i] else 0 for i in range(items)])
    
    return total_profit, total_weight

problem = Problem(1, 1, 1)
problem.types[0] = Binary(items)
problem.directions[0] = Problem.MAXIMIZE
problem.constraints[0] = Constraint("<=", capacity)
problem.function = knapsack

algorithm = GeneticAlgorithm(problem)
algorithm.run(10000)

for solution in unique(nondominated(algorithm.result)):
    print(solution.variables, solution.objectives)
Example #14
0
        (4493, 7102), (3600, 6950), (3100, 7250), (4700, 8450), (5400, 8450),
        (5610, 10053), (4492, 10052), (3600, 10800), (3100, 10950), (4700, 11650),
        (5400, 11650), (6650, 10800), (7300, 10950), (7300, 7250), (6650, 6950),
        (7300, 3300), (6650, 2300), (5400, 1600), (8350, 2300), (7850, 3300),
        (9450, 5750), (10150, 5750), (10358, 7103), (9243, 7102), (8350, 6950),
        (7850, 7250), (9450, 8450), (10150, 8450), (10360, 10053), (9242, 10052),
        (8350, 10800), (7850, 10950), (9450, 11650), (10150, 11650), (11400, 10800),
        (12050, 10950), (12050, 7250), (11400, 6950), (12050, 3300), (11400, 2300),
        (10150, 1600), (13100, 2300), (12600, 3300), (14200, 5750), (14900, 5750),
        (15108, 7103), (13993, 7102), (13100, 6950), (12600, 7250), (14200, 8450),
        (14900, 8450), (15110, 10053), (13992, 10052), (13100, 10800), (12600, 10950),
        (14200, 11650), (14900, 11650), (16150, 10800), (16800, 10950), (16800, 7250),
        (16150, 6950), (16800, 3300), (16150, 2300), (14900, 1600), (19800, 800),
        (19800, 10000), (19800, 11900), (19800, 12200), (200, 12200), (200, 1100),
        (200, 800)]

def dist(x, y):
    return round(math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2))
    
def tsp(x):
    tour = x[0]
    return sum([dist(cities[tour[i]], cities[tour[(i + 1) % len(cities)]]) for i in range(len(tour))])

problem = Problem(1, 1)
problem.types[0] = Permutation(range(len(cities)))
problem.directions[0] = Problem.MINIMIZE
problem.function = tsp
 
algorithm = GeneticAlgorithm(problem)
algorithm.run(100000, callback = lambda a : print(a.nfe, unique(nondominated(algorithm.result))[0].objectives[0]))
Example #15
0
def optimize(problem, algorithm, iterations=100, write_forcefields=None):
    """
    Uniform wrapper function that steps through the optimization process. Also provides uniform handling of output files.

    :param problem: EZFF Problem to be optimized
    :type problem: Problem

    :param algorithm: EZFF Algorithm to use for optimization. Allowed options are ``NSGAII``, ``NSGAIII`` and ``IBEA``
    :type algorithm: str

    :param iterations: Number of epochs to perform the optimization for
    :type iterations: int

    :param write_forcefields: All non-dominated forcefields are written out every ``write_forcefields`` epochs. If this is ``None``, the forcefields are written out for the first and last epoch
    :type write_forcefields: int or None

    """

    # Convert algorithm and iterations into lists
    if not isinstance(algorithm, list):
        algorithm = [algorithm]
    if not isinstance(iterations, list):
        iterations = [iterations]

    if not len(algorithm) == len(iterations):
        raise ValueError(
            "Please provide a maximum number of epochs for each algorithm")

    total_epochs = 0
    current_solutions = None
    for stage in range(0, len(algorithm)):

        # Construct an algorithm
        algorithm_for_this_stage = generate_algorithm(
            algorithm[stage]["myproblem"],
            algorithm[stage]["algorithm_string"],
            algorithm[stage]["population"], current_solutions,
            algorithm[stage]["pool"])

        if not isinstance(write_forcefields, int):
            write_forcefields = iterations[stage]

        for i in range(0, iterations[stage]):
            total_epochs += 1
            print('Epoch: ' + str(total_epochs))
            algorithm_for_this_stage.step()

            # Make output files/directories
            outdir = 'results/' + str(total_epochs)
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

            varfilename = outdir + '/variables'
            objfilename = outdir + '/errors'
            varfile = open(varfilename, 'w')
            objfile = open(objfilename, 'w')
            for solution in unique(
                    nondominated(algorithm_for_this_stage.result)):
                varfile.write(' '.join(
                    [str(variables) for variables in solution.variables]))
                varfile.write('\n')
                objfile.write(' '.join(
                    [str(objective) for objective in solution.objectives]))
                objfile.write('\n')
            varfile.close()
            objfile.close()

            if total_epochs % (write_forcefields - 1) == 0:
                if not os.path.isdir(outdir + '/forcefields'):
                    os.makedirs(outdir + '/forcefields')
                for sol_index, solution in enumerate(
                        unique(nondominated(algorithm_for_this_stage.result))):
                    ff_name = outdir + '/forcefields/FF_' + str(sol_index)
                    parameters_dict = dict(
                        zip(problem.variables, solution.variables))
                    write_forcefield_file(ff_name,
                                          problem.template,
                                          parameters_dict,
                                          verbose=False)

            current_solutions = algorithm_for_this_stage.population
def optimize(model,
             scenario,
             nfe,
             epsilons,
             sc_name,
             algorithm=EpsNSGAII,
             searchover='levers'):
    '''optimize the model
    
    Parameters
    ----------
    model : a Model instance
    algorith : a valid Platypus optimization algorithm
    nfe : int
    searchover : {'uncertainties', 'levers'}
    
    Returns
    -------
    pandas DataFrame
    
    
    Raises
    ------
    EMAError if searchover is not one of 'uncertainties' or 'levers'
    
    TODO:: constrains are not yet supported
    
    '''
    if searchover not in ('levers', 'uncertainties'):
        raise EMAError(("searchover should be one of 'levers' or"
                        "'uncertainties' not {}".format(searchover)))

    # extract the levers and the outcomes
    decision_variables = [dv for dv in getattr(model, searchover)]
    outcomes = [
        outcome for outcome in model.outcomes
        if outcome.kind != AbstractOutcome.INFO
    ]

    evalfunc = functools.partial(evaluate_function,
                                 model=model,
                                 scenario=scenario,
                                 decision_vars=decision_variables,
                                 searchover=searchover)

    # setup the optimization problem
    # TODO:: add constraints
    problem = Problem(len(decision_variables), len(outcomes))
    problem.types[:] = [
        Real(dv.lower_bound, dv.upper_bound) for dv in decision_variables
    ]
    problem.function = evalfunc
    problem.directions = [outcome.kind for outcome in outcomes]

    # solve the optimization problem
    optimizer = algorithm(problem, epsilons=epsilons)
    optimizer.run(nfe)

    # extract the names for levers and the outcomes
    lever_names = [dv.name for dv in decision_variables]
    outcome_names = [outcome.name for outcome in outcomes]

    solutions = []
    for solution in unique(nondominated(optimizer.result)):
        decision_vars = dict(zip(lever_names, solution.variables))
        decision_out = dict(zip(outcome_names, solution.objectives))
        result = {**decision_vars, **decision_out}
        solutions.append(result)

    #print("fe_result: ", optimizer.algorithm.fe_results)
    #plot_convergence(optimizer.algorithm.hv_results, sc_name)
    results = pd.DataFrame(solutions, columns=lever_names + outcome_names)

    #save the hypervolume output in a csv file
    hv = np.swapaxes(
        np.array(optimizer.algorithm.hv_results), 0, 1
    )  #hv is a 2d list, where hv[0] is the record of nfe's, hv[1] is the record of hypervolume
    df = pd.DataFrame(hv).transpose()
    #df.to_csv("Hypervolume_scenario_{}_v6.csv".format(sc_name))

    return results, df
def optimize(problem, algorithm, iterations=100, write_forcefields=None):
    """
    The optimize function provides a uniform wrapper to solve the EZFF problem using the algorithm(s) provided.

    :param problem: EZFF Problem to be optimized
    :type problem: Problem

    :param algorithm: EZFF Algorithm(s) to use for optimization. Allowed options are ``NSGAII``, ``NSGAIII`` and ``IBEA``, or a list containing any sequence of these options. The algorithms will be used in the sequence provided
    :type algorithm: str or list (of strings)

    :param iterations: Number of epochs to perform the optimization for. If multiple algorithms are specified, one iteration value should be provided for each algorithm
    :type iterations: int or list (of ints)

    :param write_forcefields: All non-dominated forcefields are written out every ``write_forcefields`` epochs. If this is ``None``, the forcefields are written out for the first and last epoch
    :type write_forcefields: int or None

    """
    # Convert algorithm and iterations into lists
    if not isinstance(algorithm, list):
        algorithm = [algorithm]
    if not isinstance(iterations, list):
        iterations = [iterations]

    if not len(algorithm) == len(iterations):
        raise ValueError(
            "Please provide a maximum number of epochs for each algorithm")

    total_epochs = 0
    current_solutions = None
    for stage in range(0, len(algorithm)):

        # Construct an algorithm
        algorithm_for_this_stage = _generate_algorithm(
            algorithm[stage]["myproblem"],
            algorithm[stage]["algorithm_string"],
            algorithm[stage]["population"],
            algorithm[stage]["mutation_probability"], current_solutions,
            algorithm[stage]["pool"])

        if not isinstance(write_forcefields, int):
            write_forcefields = np.sum(
                [iterations[stage_no] for stage_no in range(stage + 1)])

        for i in range(0, iterations[stage]):
            total_epochs += 1
            print('Epoch: ' + str(total_epochs))
            algorithm_for_this_stage.step()

            # Make output files/directories
            outdir = 'results/' + str(total_epochs)
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

            varfilename = outdir + '/variables'
            objfilename = outdir + '/errors'
            varfile = open(varfilename, 'w')
            objfile = open(objfilename, 'w')
            for solution in unique(
                    nondominated(algorithm_for_this_stage.result)):
                varfile.write(' '.join(
                    [str(variables) for variables in solution.variables]))
                varfile.write('\n')
                objfile.write(' '.join(
                    [str(objective) for objective in solution.objectives]))
                objfile.write('\n')
            varfile.close()
            objfile.close()

            if total_epochs % write_forcefields == 0:
                if not os.path.isdir(outdir + '/forcefields'):
                    os.makedirs(outdir + '/forcefields')
                for sol_index, solution in enumerate(
                        unique(nondominated(algorithm_for_this_stage.result))):
                    ff_name = outdir + '/forcefields/FF_' + str(sol_index + 1)
                    parameters_dict = dict(
                        zip(problem.variables, solution.variables))
                    generate_forcefield(problem.template,
                                        parameters_dict,
                                        outfile=ff_name)

            current_solutions = algorithm_for_this_stage.population
Example #18
0
 def __call__(self, optimizer):
     n_solutions = 0
     for _ in platypus.unique(platypus.nondominated(optimizer.result)):
         n_solutions += 1
     self.results.append(n_solutions)
     super().__call__(optimizer)
Example #19
0
from platypus import GeneticAlgorithm, Problem, Constraint, Binary, nondominated, unique

# This simple example has an optimal value of 15 when picking items 1 and 4.
items = 7
capacity = 9
weights = [2, 3, 6, 7, 5, 9, 4]
profits = [6, 5, 8, 9, 6, 7, 3]


def knapsack(x):
    selection = x[0]
    total_weight = sum(
        [weights[i] if selection[i] else 0 for i in range(items)])
    total_profit = sum(
        [profits[i] if selection[i] else 0 for i in range(items)])

    return total_profit, total_weight


problem = Problem(1, 1, 1)
problem.types[0] = Binary(items)
problem.directions[0] = Problem.MAXIMIZE
problem.constraints[0] = Constraint("<=", capacity)
problem.function = knapsack

algorithm = GeneticAlgorithm(problem)
algorithm.run(10000)

for solution in unique(nondominated(algorithm.result)):
    print(solution.variables, solution.objectives)