Exemple #1
0
 def load_fpga(self, config_data):
     """Loads a 2d array of configuration data onto to the FPGA"""
     logger.start_timer()
     #        self.load_cram(config_data)
     #        self.write_config_file()
     #        flash_config_file(self.base_file_name)
     logger.stop_timer('INTERFACE.PY load_fpga')
Exemple #2
0
 def evaluate(self, data, datatype=int):
     """Evaluates given data on the FPGA."""
     logger.start_timer()
     results = []
     for datum in data:
         pred = None
         attempts = 0
         while pred is None:
             attempts += 1
             if attempts > 10:
                 raise ValueError('Tried 10 times to evaluate_arduino')
             try:
                 pred = evaluate_arduino(datum,
                                         send_type=datatype,
                                         return_type=float)
             except (UnicodeDecodeError, ValueError):
                 pass
         results.append(pred)
     if logger._print_time:
         data = np.array(data).astype(float)
         d = np.array(data / (2**6))
         r = np.array(results)
         mse = (d - r)**2
         arr = np.column_stack((d, r, mse))
         np.set_printoptions(suppress=True)
         print(arr)
     logger.stop_timer('INTERFACE.PY Evaluation complete')
     return results
Exemple #3
0
    def compute_fitness(self, pop):
        """Calculates the fitness scores for the entire Population

        Args:
            pop (list): An iterable of Individual(np.ndarrays) that represent the individuals

        Returns:
            Number of individuals with invalid fitness scores we updated
        """

        logger.start_timer()

        # Evaluate the individuals with an invalid fitness or if we are at the start
        # of the evolutionary algo, AKA curr_gen == 0
        # (These are the individuals that have not been evaluated before -
        # individuals at the start of the evolutionary algorithm - or those
        # that have been mutated / the offspring after crossover with fitness deleted)
        invalid_inds = [
            ind for ind in pop if not ind.fitness.valid or self.curr_gen == 0
        ]

        # Get fitness score for each individual with
        # invalid fitness score in population
        for ind in invalid_inds:

            # Load Weights into model using individual
            self.model.load_parameters(ind)

            # Calculate the Fitness score of the individual
            ind.fitness.fitness_score = self.fitness_score()

        logger.stop_timer('SGA.PY Computing fitness')

        return len(invalid_inds)
Exemple #4
0
    def load_es_vars(self):
        """Loads the evolutionary strategy variables from checkpoint given after
        creating the fitness and individual templates for DEAP evolution or initializes them
        """

        logger.start_timer()

        if self.ckpt:
            # A file name has been given, then load the data from the file
            # Load data from pickle file
            with open(self.ckpt, "rb") as cp_file:
                cp = pickle.load(cp_file)

            self.rndstate = random.seed(cp["rndstate"])
            self.pop = cp["pop"]
            self.curr_gen = int(cp["curr_gen"])
            self.halloffame = cp["halloffame"]
            self.logbook = cp["logbook"]

        else:
            # Start a new evolution
            self.rndstate = random.seed(100)  # Set seed
            self.pop = self.toolbox.population(n=self.popsize)
            self.curr_gen = 0
            self.halloffame = tools.HallOfFame(maxsize=int(
                self.halloffamesize * self.popsize),
                                               similar=np.array_equal)
            self.logbook = tools.Logbook()

        self.paretofront = None
        logger.stop_timer('SGA.PY Loading ES Vars')
Exemple #5
0
 def evaluate(self, data):
     """Evaluates given data on the FPGA."""
     logger.start_timer()
     results = []
     for datum in data:
         pred = None
         while pred is None:
             try:
                 pred = evaluate_arduino(datum)
             except (UnicodeDecodeError, ValueError):
                 pass
         results.append(pred)
     logger.stop_timer('INTERFACE.PY Evaluation complete')
     return results
Exemple #6
0
    def save_ckpt(self):
        """Saves information necessary to resume algorithm after stopping"""

        logger.start_timer()

        # Fill the dictionary using the dict(key=value[, ...]) constructor
        cp = dict(pop=self.pop,
                  strategy=self.name,
                  curr_gen=self.curr_gen,
                  halloffame=self.halloffame,
                  paretofront=self.paretofront,
                  logbook=self.logbook,
                  rndstate=self.rndstate)

        with open(os.path.join(self.ckpt_dir, '{0:09d}.pkl'.format(self.curr_gen)), "wb") as cp_file:
            pickle.dump(cp, cp_file)

        logger.stop_timer('SGA.PY Saving checkpoint')
Exemple #7
0
    def evaluate(self, pop):

        logger.start_timer()

        """Evaluates an entire population on a dataset on the neural net / fpga
        architecture specified by the model, and calculates the fitness scores for
        each individual, sorting the entire population by fitness scores in-place

        Args:
            pop (list): An iterable of np.ndarrays that represent the individuals

        Returns:
            Average fitness score of population

        """
        # Re-generates the training set for the problem (if possible) to prevent overfitting
        self.problem.reset_train_set()

        logger.stop_timer('SGA.PY Regenerate the training set')
        logger.start_timer()

        # Compute all fitness for population
        num_invalid_inds = self.compute_fitness(pop)
        logger.start_timer()
        logger.stop_timer('SGA.PY Computing all fitness for population')
        logger.start_timer()

        # The population is entirely replaced by the
        # evaluated offspring
        self.pop[:] = pop
        logger.stop_timer('SGA.PY Replace population with evaluated offspring')
        logger.start_timer()

        # Update population statistics
        self.halloffame.update(self.pop)
        # record = self.stats.compile(self.pop)
        # self.logbook.record(gen=self.curr_gen, evals=num_invalid_inds, **record)
        logger.stop_timer('SGA.PY Updating population statistics')

        return np.mean([ind.fitness.fitness_score for ind in pop])
Exemple #8
0
    def init_fitness_and_inds():
        logger.start_timer()
        """Initializes the fitness and definition of individuals"""

        class Fitness(base.Fitness):
            def __init__(self):
                super().__init__()
                self.__fitness_score = None

            @property
            def fitness_score(self):
                return self.values[0]

            @fitness_score.setter
            def fitness_score(self, fitness_score):
                self.__fitness_score = fitness_score
                if fitness_score:
                    # WARNING:
                    # Setting values breaks alot of things:
                    # self.__fitness_score is reset to None
                    # after setting values, so you should only
                    # set values after all the scores you require are set
                    self.values = (fitness_score,)

            @fitness_score.deleter
            def fitness_score(self):
                if hasattr(self, '__fitness_score'):
                    del self.__fitness_score

            def delValues(self):
                super().delValues()
                if hasattr(self, '__fitness_score'):
                    del self.__fitness_score

        creator.create("FitnessMin", Fitness, weights=(-1.0,)) # Just Fitness
        creator.create("Individual", np.ndarray, fitness=creator.FitnessMin)

        logger.stop_timer('SGA.PY Initializing fitness and individuals')
        logger.start_timer()
Exemple #9
0
def predict(model_type, problem_type, strategy, input_data, ckpt, save_dir):
    """Predicts the output from loading the model saved in checkpoint
    and saves y_pred into same path as input_data but with a _y_pred in the name

    Args:
        model_type (str): A string specifying whether we're optimizing on a neural network
            or field programmable gate array
        problem_type (str): A string specifying what type of problem we're trying to optimize
        strategy (str): A string specifying what type of optimization algorithm to use
        input_data (str): Path to the .npy that stores the np.ndarray to use as Input data for model
        ckpt (str): Location of checkpoint to load the population
        save_dir (str): Location of where to store the predictions

    """

    # 1. Choose Problem and get the specific evaluation function
    # for that problem

    logger.start_timer()
    logger.log("Loading problem...")
    if problem_type == 'mnist':
        problem = ProblemMNIST()
    else:
        problem = ProblemFuncApprox(func=problem_type)

    logger.stop_timer('PREDICT.PY Choosing evaluation function for problem')
    logger.start_timer()

    # 1. Choose Target Platform
    logger.log("Loading target platform...")
    if model_type == 'nn':
        from varro.algo.models import ModelNN as Model  # Import here so we don't load tensorflow if not needed
    elif model_type == 'fpga':
        from varro.algo.models import ModelFPGA as Model
    model = Model(problem)

    logger.stop_timer('PREDICT.PY Choosing target platform')
    logger.start_timer()

    if ckpt.endswith(".bit"):
        logger.log("Loading data from bit file...")
        from varro.fpga.config import bit_to_cram

        logger.start_timer()
        predict_ind = bit_to_cram(ckpt)
        parameters = bit_to_cram(ckpt)

        logger.stop_timer('PREDICT.PY Loading data from bit file')
        logger.start_timer()

    elif ckpt.endswith(".pkl"):
        logger.log("Loading data from pickle file...")

        logger.start_timer()
        with open(ckpt, "rb") as cp_file:
            if strategy == 'sga':
                StrategySGA.init_fitness_and_inds()
            elif strategy == 'moga':
                StrategyMOGA.init_fitness_and_inds()
            elif strategy == 'ns-es':
                StrategyNSES.init_fitness_and_inds()
            elif strategy == 'nsr-es':
                StrategyNSRES.init_fitness_and_inds()
            elif strategy == 'cma-es':
                raise NotImplementedError
            else:
                raise NotImplementedError

            # Initialize individual based on strategy
            cp = pickle.load(cp_file)
            predict_ind = cp["halloffame"][0]
            logger.stop_timer('PREDICT.PY Loading data from pickle file')
            logger.start_timer()
            parameters = cp["halloffame"][0]

    else:
        raise ValueError("Checkpoint file has unrecognised extension.")

    logger.start_timer()
    # Load Weights into model using individual
    model.load_parameters(parameters)

    logger.stop_timer('PREDICT.PY Loading weights into model')
    logger.start_timer()

    # Predict labels using np array in input_data
    logger.log("Running model.predict")
    y_pred = np.array(model.predict(np.load(input_data)))
    logger.log(str(y_pred))
    logger.stop_timer('PREDICT.PY Predicting labels using np array')

    # Save the y_pred into a file
    y_pred_path = join(
        save_dir,
        ckpt.split('/')[-1][:-4] + '_' + input_data[:-4].split('/')[-1] +
        '_y_pred.npy')
    np.save(y_pred_path, y_pred)
Exemple #10
0
def fit(model_type,
        problem_type,
        strategy,
        cxpb=None,
        mutpb=None,
        imutpb=None,
        imutmu=None,
        imutsigma=None,
        sample_size=500,
        popsize=None,
        elitesize=None,
        ngen=None,
        ckpt=None,
        ckpt_freq=10,
        novelty_metric=None,
        halloffamesize=None,
        earlystop=False,
        grid_search=False,
        ckpt_dir=None):
    """Control center to call other modules to execute the optimization

    Args:
        model_type (str): A string specifying whether we're optimizing on a neural network
            or field programmable gate array
        problem_type (str): A string specifying what type of problem we're trying to optimize
        strategy (str): A string specifying what type of optimization algorithm to use
        cxpb (float): Cross-over probability for evolutionary algorithm
        mutpb (float): Mutation probability for evolutionary algorithm
        imutpb (float): Mutation probability for each individual's attribute
        imutmu (float): Mean parameter for the Gaussian Distribution we're mutating an attribute from
        imutsigma (float): Sigma parameter for the Gaussian Distribution we're mutating an attribute from
        popsize (int): Number of individuals to keep in each Population
        elitesize (float): Percentage of fittest individuals to pass on to next generation
        ngen (int): Number of generations to run an evolutionary algorithm
        ckpt (str): Location of checkpoint to load the population
        novelty_metric (str): The distance metric to be used to measure an Individual's novelty
        halloffamesize (float): Percentage of individuals in population we store in the HallOfFame / Archive
        grid_search (bool): Whether grid search will be in effect
        ckpt_dir (bool): Directory to save checkpoints in

    Returns:
        fittest_ind_score: Scalar of the best individual in the population's fitness score

    """
    # 1. Choose Problem and get the specific evaluation function for that problem

    logger.start_timer()
    logger.log("Loading problem...")
    if problem_type == 'mnist':
        problem = ProblemMNIST()
    else:
        problem = ProblemFuncApprox(problem_type, sample_size)

    logger.stop_timer(
        'FIT.PY Choosing problem and getting specific evaluation function')
    logger.start_timer()

    # 2. Choose Target Platform
    logger.log("Loading target platform...")
    if model_type == 'nn':
        from varro.algo.models import ModelNN as Model  # Import here so we don't load tensorflow if not needed
    elif model_type == 'fpga':
        from varro.algo.models import ModelFPGA as Model
    model = Model(problem)

    logger.stop_timer('FIT.PY Loading target platform')
    logger.start_timer()

    strategy_args = dict(novelty_metric=novelty_metric,
                         model=model,
                         problem=problem,
                         cxpb=cxpb,
                         mutpb=mutpb,
                         popsize=popsize,
                         elitesize=elitesize,
                         ngen=ngen,
                         imutpb=imutpb,
                         imutmu=imutmu,
                         imutsigma=imutsigma,
                         ckpt=ckpt,
                         halloffamesize=halloffamesize,
                         earlystop=earlystop,
                         ckpt_dir=ckpt_dir)

    # 3. Set Strategy
    logger.log("Loading strategy...")
    if strategy == 'sga':
        strategy = StrategySGA(**strategy_args)
    elif strategy == 'moga':
        strategy = StrategyMOGA(**strategy_argsp)
    elif strategy == 'ns-es':
        strategy = StrategyNSES(**strategy_args)
    elif strategy == 'nsr-es':
        strategy = StrategyNSRES(**strategy_args)
    elif strategy == 'cma-es':
        strategy = StrategyCMAES(**strategy_args)
    else:
        raise NotImplementedError

    logger.start_timer()
    # 4. Evolve
    pop, avg_fitness_scores, fittest_ind_score = evolve(
        strategy=strategy, grid_search=grid_search, ckpt_freq=ckpt_freq)

    logger.stop_timer('FIT.PY Evolving')
    logger.start_timer()

    return fittest_ind_score
Exemple #11
0
def main():
    # Create Logs folder if not created
    make_path(ABS_ALGO_EXP_LOGS_PATH)
    make_path(ABS_ALGO_HYPERPARAMS_PATH)
    make_path(ABS_ALGO_PREDICTIONS_PATH)

    # Get the Arguments parsed from file execution
    args = get_args()

    experiment_name = args.model_type + '_' + args.problem_type + '_' + datetime.now(
    ).strftime(DATE_NAME_FORMAT)

    # Init Loggers
    log_path = join(ABS_ALGO_EXP_LOGS_PATH, experiment_name + '.log')

    logger.add_output(StdOutput())
    logger.add_output(TextOutput(log_path))
    logger.log("Running Project Varro")
    logger.log("Purpose: " + args.purpose)

    if args.verbose:
        logger.set_timer(True)

    if args.hyper_opt is not None:
        if args.hyper_opt == 'grid_search':
            from varro.algo.hyperparam_opt.grid_search import grid_search
            checkpoint_dir = join(GRID_SEARCH_CHECKPOINTS_PATH, 'tmp')
            make_path(checkpoint_dir)
            grid_search()
        elif args.hyper_opt == 'bayesian_opt':
            raise NotImplementedError
        else:
            raise ValueError("Unknown hyperparameter optimization method.")
        return
    else:
        checkpoint_dir = join(EXPERIMENT_CHECKPOINTS_PATH, experiment_name)
        make_path(checkpoint_dir)

    # Check if we're fitting or predicting
    if args.purpose == 'fit':
        # Start Optimization

        logger.start_timer()
        fit(model_type=args.model_type,
            problem_type=args.problem_type,
            strategy=args.strategy,
            cxpb=args.cxpb,
            mutpb=args.mutpb,
            imutpb=args.imutpb,
            imutmu=args.imutmu,
            imutsigma=args.imutsigma,
            sample_size=args.samplesize,
            popsize=args.popsize,
            elitesize=args.elitesize,
            ngen=args.ngen,
            ckpt=args.ckpt,
            ckpt_freq=args.ckpt_freq,
            novelty_metric=args.novelty_metric,
            halloffamesize=args.halloffamesize,
            earlystop=args.earlystop,
            ckpt_dir=checkpoint_dir)
        logger.stop_timer('EXPERIMENT.PY Fitting complete')

    else:
        if args.ckptfolder:
            # Make predictions using the best individual from each generation in ckptfolder

            logger.start_timer()
            save_dir = join(ABS_ALGO_PREDICTIONS_PATH,
                            args.ckptfolder.split('/')[-1])
            make_path(save_dir)
            ckpt_files = [
                join(args.ckptfolder, f) for f in listdir(args.ckptfolder)
                if isfile(join(args.ckptfolder, f))
            ]
            for ckpt in ckpt_files:
                predict(model_type=args.model_type,
                        problem_type=args.problem_type,
                        strategy=args.strategy,
                        input_data=args.input_data,
                        ckpt=ckpt,
                        save_dir=save_dir)

            logger.stop_timer(
                'EXPERIMENT.PY Making predictions using the best individual from each generation'
            )

        else:
            # Make a single prediction

            logger.start_timer()
            save_dir = join(ABS_ALGO_PREDICTIONS_PATH,
                            args.ckpt.split('/')[-2])
            make_path(save_dir)
            predict(model_type=args.model_type,
                    problem_type=args.problem_type,
                    strategy=args.strategy,
                    input_data=args.input_data,
                    ckpt=args.ckpt,
                    save_dir=save_dir)

            logger.stop_timer('EXPERIMENT.PY Making a single prediction')
Exemple #12
0
def es_toolbox(strategy_name,
               i_shape,
               evaluate,
               model_type,
               imutpb=None,
               imutmu=None,
               imutsigma=None):
    """Initializes and configures the DEAP toolbox for evolving the parameters of a model.

    Args:
        strategy_name (str): The strategy that is being used for evolution
        i_shape (int or tuple): Size or shape of an individual in the population
        evaluate (function): Function to evaluate an entire population
        model_type (str): A string specifying whether we're optimizing on a neural network
            or field programmable gate array
        imutpb (float): Mutation probability for each individual's attribute
        imutmu (float): Mean parameter for the Gaussian Distribution we're mutating an attribute from
        imutsigma (float): Sigma parameter for the Gaussian Distribution we're mutating an attribute from

    Returns:
        toolbox (deap.base.Toolbox): Configured DEAP Toolbox for the algorithm.

    """
    logger.log("Initializing toolbox...")
    # Set seed
    seed = int(time.time())
    random.seed(seed)

    logger.log('TOOLBOX.PY random seed is {}'.format(seed))

    logger.start_timer()

    # Initialize Toolbox
    toolbox = base.Toolbox()

    logger.stop_timer('TOOLBOX.PY Initializing toolbox')
    logger.start_timer()
    # Defining tools specific to model
    if model_type == "nn":

        # ATTRIBUTE
        toolbox.register("attribute", random.random)

        logger.stop_timer('TOOLBOX.PY register("attribute")')
        logger.start_timer()

        # INDIVIDUAL
        toolbox.register("individual",
                         getattr(tools, 'initRepeat'),
                         getattr(creator, 'Individual'),
                         getattr(toolbox, 'attribute'),
                         n=i_shape)

        logger.stop_timer('TOOLBOX.PY register("individual")')
        logger.start_timer()

        # MUTATION
        toolbox.register("mutate",
                         getattr(tools, 'mutGaussian'),
                         mu=imutmu,
                         sigma=imutsigma,
                         indpb=imutpb)

        logger.stop_timer('TOOLBOX.PY register("mutate")')
        logger.start_timer()

        # POPULATION
        toolbox.register("population", getattr(tools, 'initRepeat'), list,
                         getattr(toolbox, 'individual'))

        logger.stop_timer('TOOLBOX.PY register("population")')
        logger.start_timer()

        # MATING
        toolbox.register("mate", getattr(tools, 'cxTwoPoint'))

        logger.stop_timer('TOOLBOX.PY register("mate")')
        logger.start_timer()

    elif model_type == "fpga":

        logger.start_timer()

        # ATTRIBUTE
        toolbox.register("attribute", np.random.choice, [False, True])
        size = np.prod(i_shape)

        logger.stop_timer('TOOLBOX.PY register("attribute")')
        logger.start_timer()

        # MUTATION
        def mutate_individual(ind):
            idx = np.argwhere(
                np.random.choice([False, True], size, p=[1 - imutpb, imutpb]))
            ind[idx] = np.invert(ind[idx])
            return ind

        toolbox.register("mutate", mutate_individual)

        logger.stop_timer('TOOLBOX.PY register("mutate")')
        logger.start_timer()

        # POPULATION
        def init_population(ind_class, n):
            pop = np.random.choice([False, True], size=(n, size))
            return [ind_class(ind) for ind in pop]

        toolbox.register("population", init_population,
                         getattr(creator, 'Individual'))

        logger.stop_timer('TOOLBOX.PY register("population")')
        logger.start_timer()

        # MATING
        from varro.fpga.cross_over import cross_over
        toolbox.register("mate", cross_over)

        logger.stop_timer('TOOLBOX.PY register("mate")')
        logger.start_timer()

    # SELECTION METHOD
    logger.start_timer()
    if strategy_name == 'nsr-es':
        toolbox.register("select_elite", getattr(
            tools, 'selSPEA2'))  # Use Multi-objective selection method
        toolbox.register("select", getattr(tools, 'selRandom'))
    else:
        toolbox.register("select_elite",
                         getattr(tools, 'selTournament'),
                         tournsize=3)
        toolbox.register("select", getattr(tools, 'selRandom'))

    logger.stop_timer('TOOLBOX.PY register("select")')
    logger.start_timer()

    # EVALUATE
    toolbox.register("evaluate", evaluate)

    logger.stop_timer('TOOLBOX.PY register("evaluate")')

    return toolbox
Exemple #13
0
def evolve(strategy, grid_search=False, ckpt_freq=10):
    """Evolves parameters to train a model on a dataset.

    Args:
        strategy (Strategy): The strategy to be used for evolving, Simple Genetic Algorithm (sga) / Novelty Search (ns) / Covariance-Matrix Adaptation (cma-es)
        grid_search (bool): Whether grid search will be in effect

    Returns:
        pop: Population of the fittest individuals so far
        avg_fitness_scores: A list of the average fitness scores for each generation
        fittest_ind_score: The Best Individual's fitness score

    """

    ########################################################
    # 1. SET UP LOGGER, FOLDERS, AND FILES TO SAVE DATA TO #
    ########################################################

    def process_record(record):
        now = datetime.utcnow()
        try:
            delta = now - process_record.now
        except AttributeError:
            delta = 0
        process_record.now = now
        return {'time_since_last': delta}

    logger.log('Starting Evolution ...')
    logger.log('strategy: {}'.format(strategy.name))
    logger.log('problem_type: {}'.format(strategy.problem.name))
    logger.log('cxpb: {}'.format(strategy.cxpb))
    logger.log('mutpb: {}'.format(strategy.mutpb))
    logger.log('popsize: {}'.format(strategy.popsize))
    logger.log('elitesize: {}'.format(strategy.elitesize))
    logger.log('ngen: {}'.format(strategy.ngen))
    logger.log('imutpb: {}'.format(strategy.imutpb))
    logger.log('imutmu: {}'.format(strategy.imutmu))
    logger.log('imutsigma: {}'.format(strategy.imutsigma))
    logger.log('halloffamesize: {}'.format(strategy.halloffamesize))
    logger.log('earlystop: {}'.format(strategy.earlystop))

    # Set additional logging information about experiment
    # if not simple genetic algorithm strategy
    if strategy.name == 'ns-es' or strategy.name == 'nsr-es':
        logger.log('novelty_metric: {}'.format(strategy.novelty_metric))

    ###############################
    # 2. CURRENT POPULATION STATS #
    ###############################
    # Track the Average fitness scores
    avg_fitness_scores = []

    # Evaluate the entire population
    logger.start_timer()
    avg_fitness_score = strategy.toolbox.evaluate(pop=strategy.pop)
    avg_fitness_scores.append(avg_fitness_score)
    logger.stop_timer('EVOLVE.PY strategy.toolbox.evaluate complete')

    #################################
    # 4. EVOLVE THROUGH GENERATIONS #
    #################################
    # Iterate for generations
    start_gen = strategy.curr_gen
    for g in range(start_gen, strategy.ngen):

        logger.start_timer()

        # Select the next generation individuals
        non_alterable, alterable = strategy.generate_offspring()

        logger.stop_timer(
            'EVOLVE.PY Selecting the next generation of individuals')

        logger.start_timer()

        # Mate offspring
        strategy.mate(alterable)

        logger.stop_timer('EVOLVE.PY Mating offspring')
        logger.start_timer()

        # Mutate offspring
        strategy.mutate(alterable)
        logger.stop_timer('EVOLVE.PY Mutating offspring')

        # Recombine Non-alterable offspring with the
        # ones that have been mutated / cross-overed
        offspring = non_alterable + alterable

        # Evaluate the entire population
        strategy.curr_gen = g  # Set the current generation
        avg_fitness_score = strategy.toolbox.evaluate(pop=offspring)
        avg_fitness_scores.append(avg_fitness_score)

        # Save snapshot of population (offspring)
        if g % ckpt_freq == 0 or g == strategy.ngen - 1:
            # Save the checkpoint
            strategy.save_ckpt()

        # Best individual's fitness / novelty score,
        # whichever is the first element of the fitness
        # values tuple because:
        # The hall of fame contains the best individual
        # that ever lived in the population during the
        # evolution. It is lexicographically sorted at all
        # time so that the first element of the hall of fame
        # is the individual that has the best first fitness value
        # ever seen, according to the weights provided to the fitness at creation time.
        if strategy.name == 'sga' or strategy.name == 'nsr-es':
            fittest_ind_score = strategy.halloffame[0].fitness.fitness_score
        elif strategy.name == 'ns-es':
            fittest_ind_score = strategy.halloffame[0].fitness.novelty_score
        elif strategy.name == 'moga':
            fittest_ind_score = strategy.halloffame[0].fitness.fitness_scores[
                0]  # Gets the first objective
        else:
            raise NotImplementedError

        # Log Average score of population
        logger.log(('Generation {:0' + str(len(str(strategy.ngen-1))) + '} | Avg. Fitness Score: {:.5f} | Fittest Individual Score: {:.5f}')\
                        .format(g, avg_fitness_score, fittest_ind_score))

        # Early Stopping if average fitness
        # score is close to the minimum possible,
        # or if stuck at local optima (average fitness score
        # hasnt changed for past 10 rounds)
        if strategy.earlystop and (strategy.name == 'sga'
                                   or strategy.name == 'nsr-es'):
            if strategy.problem.approx_type == Problem.CLASSIFICATION:
                if round(-fittest_ind_score, 4) > 0.95:
                    logger.log(
                        'Early Stopping activated because Accuracy > 95%.')
                    break
                if len(avg_fitness_scores) > 10 and len(
                        set(avg_fitness_scores[-10:])) == 1:
                    logger.log(
                        'Early Stopping activated because fitness scores have converged.'
                    )
                    break
            else:
                if round(fittest_ind_score, 4) < 0.01:
                    logger.log('Early Stopping activated because MSE < 0.01.')
                    break
                if len(avg_fitness_scores) > 10 and len(
                        set(avg_fitness_scores[-10:])) == 1:
                    logger.log(
                        'Early Stopping activated because fitness scores have converged.'
                    )
                    break
    return strategy.pop, avg_fitness_scores, fittest_ind_score
Exemple #14
0
 def load_fpaa(self, config_data):
     """Loads a 2d array of configuration data onto to the FPAA"""
     logger.start_timer() 
     raise NotImplementedError
     logger.stop_timer('INTERFACE.PY load_fpaa')