def test_should_hypervolume_return_the_correct_value_when_applied_to_the_ZDT1_reference_front(self):
        problem = ZDT1()
        problem.reference_front = read_solutions(filename='resources/reference_front/ZDT1.pf')

        reference_point = [1, 1]

        hv = HyperVolume(reference_point)
        value = hv.compute(problem.reference_front)

        self.assertAlmostEqual(0.666, value, delta=0.001)
from examples.multiobjective.parallel.zdt1_modified import ZDT1Modified
from jmetal.algorithm.multiobjective.omopso import OMOPSO
from jmetal.operator import UniformMutation
from jmetal.operator.mutation import NonUniformMutation
from jmetal.util.archive import CrowdingDistanceArchive
from jmetal.util.solution_list import print_function_values_to_file, print_variables_to_file
from jmetal.util.solution_list import read_solutions, \
    SparkEvaluator
from jmetal.util.termination_criterion import StoppingByEvaluations

if __name__ == '__main__':
    problem = ZDT1Modified()
    problem.reference_front = read_solutions(
        filename='../../resources/reference_front/{}.pf'.format(
            problem.get_name()))
    mutation_probability = 1.0 / problem.number_of_variables

    max_evaluations = 100
    swarm_size = 10
    algorithm = OMOPSO(
        problem=problem,
        swarm_size=swarm_size,
        epsilon=0.0075,
        uniform_mutation=UniformMutation(probability=mutation_probability,
                                         perturbation=0.5),
        non_uniform_mutation=NonUniformMutation(
            mutation_probability,
            perturbation=0.5,
            max_iterations=max_evaluations / swarm_size),
        leaders=CrowdingDistanceArchive(10),
        termination_criterion=StoppingByEvaluations(max=max_evaluations),
Beispiel #3
0
from jmetal.algorithm.multiobjective.nsgaii import NSGAII
from jmetal.operator import SBXCrossover, PolynomialMutation, BinaryTournamentSelection
from jmetal.problem import ZDT1, ZDT4
from jmetal.util.comparator import RankingAndCrowdingDistanceComparator, DominanceComparator
from jmetal.util.observer import ProgressBarObserver, VisualizerObserver
from jmetal.util.solution_list import read_solutions, print_function_values_to_file, print_variables_to_file
from jmetal.util.termination_criterion import StoppingByEvaluations
from jmetal.util.visualization import Plot, InteractivePlot

if __name__ == '__main__':
    problem = ZDT4()
    problem.reference_front = read_solutions(
        filename='../../resources/reference_front/ZDT1.pf')

    max_evaluations = 25000
    algorithm = NSGAII(
        problem=problem,
        population_size=100,
        offspring_population_size=100,
        mutation=PolynomialMutation(probability=1.0 /
                                    problem.number_of_variables,
                                    distribution_index=20),
        crossover=SBXCrossover(probability=1.0, distribution_index=20),
        selection=BinaryTournamentSelection(
            comparator=RankingAndCrowdingDistanceComparator()),
        termination_criterion=StoppingByEvaluations(max=max_evaluations),
        dominance_comparator=DominanceComparator())

    algorithm.observable.register(observer=ProgressBarObserver(
        max=max_evaluations))
    algorithm.observable.register(observer=VisualizerObserver(
Beispiel #4
0
def generate_summary_from_experiment(
        input_dir: str,
        quality_indicators: List[QualityIndicator],
        reference_fronts: str = ''):
    """ Compute a list of quality indicators. The input data directory *must* met the following structure (this is generated
    automatically by the Experiment class):

    * <base_dir>

      * algorithm_a

        * problem_a

          * FUN.0.tsv
          * FUN.1.tsv
          * VAR.0.tsv
          * VAR.1.tsv
          * ...

    :param input_dir: Directory where all the input data is found (function values and variables).
    :param reference_fronts: Directory where reference fronts are found.
    :param quality_indicators: List of quality indicators to compute.
    :return: None.
    """

    if not quality_indicators:
        quality_indicators = []

    with open('QualityIndicatorSummary.csv', 'w+') as of:
        of.write(
            'Algorithm,Problem,ExecutionId,IndicatorName,IndicatorValue\n')

    for dirname, _, filenames in os.walk(input_dir):
        for filename in filenames:
            try:
                # Linux filesystem
                algorithm, problem = dirname.split('/')[-2:]
            except ValueError:
                # Windows filesystem
                algorithm, problem = dirname.split('\\')[-2:]

            if 'TIME' in filename:
                run_tag = [s for s in filename.split('.') if s.isdigit()].pop()

                with open(os.path.join(dirname, filename),
                          'r') as content_file:
                    content = content_file.read()

                with open('QualityIndicatorSummary.csv', 'a+') as of:
                    of.write(','.join(
                        [algorithm, problem, run_tag, 'Time',
                         str(content)]))
                    of.write('\n')

            if 'FUN' in filename:
                solutions = read_solutions(os.path.join(dirname, filename))
                run_tag = [s for s in filename.split('.') if s.isdigit()].pop()

                for indicator in quality_indicators:
                    reference_front_file = os.path.join(
                        reference_fronts, problem + '.pf')

                    # Add reference front if any
                    if hasattr(indicator, 'reference_front'):
                        if Path(reference_front_file).is_file():
                            indicator.reference_front = read_solutions(
                                reference_front_file)
                        else:
                            LOGGER.warning('Reference front not found at',
                                           reference_front_file)

                    result = indicator.compute(solutions)

                    # Save quality indicator value to file
                    with open('QualityIndicatorSummary.csv', 'a+') as of:
                        of.write(','.join([
                            algorithm, problem, run_tag,
                            indicator.get_name(),
                            str(result)
                        ]))
                        of.write('\n')