コード例 #1
0
def plot_sa_optimization(problem, random_seeds, **kwargs):

    sa_curve = []  # curve to plot for SA

    # For all temperature exponential decay rate
    for exp_decay_rate in kwargs['sa_decay_rates']:

        # Define exponential decay schedule
        print('\nSA: exp decay rate = {:.3f}'.format(exp_decay_rate))
        exp_decay = ExpDecay(init_temp=kwargs['sa_init_temp'],
                             exp_const=exp_decay_rate,
                             min_temp=kwargs['sa_min_temp'])

        sa_objectives = [
        ]  # list of results from multiple random runs for current rate

        # For multiple random runs
        for random_seed in random_seeds:

            # Run SA and get best state and objective found
            best_state, best_objective, _ = simulated_annealing(
                problem,
                schedule=exp_decay,
                max_attempts=kwargs['sa_max_iters'],
                max_iters=kwargs['sa_max_iters'],
                curve=False,
                random_state=random_seed)
            sa_objectives.append(best_objective)
            print('SA: best_fitness = {:.3f}'.format(best_objective))

        sa_curve.append(sa_objectives)  # append random run to SA curve

    # Plot, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=kwargs['sa_decay_rates'],
                      y_axis=np.array(sa_curve).T,
                      label='SA')
    utils.set_plot_title_labels(
        title='{} - Objective vs. temperature exponential decay rate'.format(
            kwargs['plot_name']),
        x_label='Exponential decay rate',
        y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR + '{}_objective_vs_temp'.format(kwargs['plot_name']))
コード例 #2
0
def plot_nn_performances(x_train, y_train, random_seeds, **kwargs):
    """Plot Neural Networks performances on the training set.

        Use different optimizations algorithms (RHC, SA, GA and GD) and compare results on the training set using
        k-fold cross-validation.

        Args:
        x_train (ndarray): training data.
        y_train (ndarray): training labels.
        random_seeds (list or array): random seeds for multiple random runs to use for k-fold cross-validation.
        kwargs (dict): additional arguments to pass for curves plotting:
                  - rhc_max_iters (list or ndarray): RHC list or array of maximum number of iterations to plot vs.
                  - sa_max_iters (list or ndarray): SA list or array of maximum number of iterations to plot vs.
                  - ga_max_iters (list or ndarray): GA list or array of maximum number of iterations to plot vs.
                  - gd_max_iters (list or ndarray): GD list or array of maximum number of iterations to plot vs.
                  - init_temp (float): SA initial temperature.
                  - exp_decay_rate (float): SA temperature exponential decay rate.
                  - min_temp (float): SA minimum temperature.
                  - pop_size (int): GA population size.
                  - mutation_prob (float): GA mutation probability.

        Returns:
        None.
           """

    # Initialize algorithms, corresponding acronyms and max number of iterations
    algorithms = [
        'random_hill_climb', 'simulated_annealing', 'genetic_alg',
        'gradient_descent'
    ]
    acronyms = ['RHC', 'SA', 'GA', 'GD']
    max_iters = [
        'rhc_max_iters', 'sa_max_iters', 'ga_max_iters', 'gd_max_iters'
    ]

    # Initialize lists of training curves, validation curves and training times curves
    train_curves, val_curves, train_time_curves = [], [], []

    # Define SA exponential decay schedule
    exp_decay = ExpDecay(init_temp=kwargs['init_temp'],
                         exp_const=kwargs['exp_decay_rate'],
                         min_temp=kwargs['min_temp'])

    # Create one figure for training and validation losses, the second for training time
    plt.figure()
    train_val_figure = plt.gcf().number
    plt.figure()
    train_times_figure = plt.gcf().number

    # For each of the optimization algorithms to test the Neural Network with
    for i, algorithm in enumerate(algorithms):
        print('\nAlgorithm = {}'.format(algorithm))

        # For multiple random runs
        for random_seed in random_seeds:

            # Initialize training losses, validation losses and training time lists for current random run
            train_losses, val_losses, train_times = [], [], []

            # Compute stratified k-fold
            x_train_fold, x_val_fold, y_train_fold, y_val_fold = train_test_split(
                x_train,
                y_train,
                test_size=0.2,
                shuffle=True,
                random_state=random_seed,
                stratify=y_train)
            # For each max iterations to run for
            for max_iter in kwargs[max_iters[i]]:

                # Define Neural Network using current algorithm
                nn = NeuralNetwork(hidden_nodes=[50, 30],
                                   activation='relu',
                                   algorithm=algorithm,
                                   max_iters=int(max_iter),
                                   bias=True,
                                   is_classifier=True,
                                   learning_rate=0.001,
                                   early_stopping=False,
                                   clip_max=1e10,
                                   schedule=exp_decay,
                                   pop_size=kwargs['pop_size'],
                                   mutation_prob=kwargs['mutation_prob'],
                                   max_attempts=int(max_iter),
                                   random_state=random_seed,
                                   curve=False)

                # Train on current training fold and append training time
                start_time = time.time()
                nn.fit(x_train_fold, y_train_fold)
                train_times.append(time.time() - start_time)

                # Compute and append training and validation log losses
                train_loss = log_loss(y_train_fold, nn.predict(x_train_fold))
                val_loss = log_loss(y_val_fold, nn.predict(x_val_fold))
                train_losses.append(train_loss)
                val_losses.append(val_loss)
                print('{} - train loss = {:.3f}, val loss = {:.3f}'.format(
                    max_iter, train_loss, val_loss))

            # Append curves for current random seed to corresponding lists of curves
            train_curves.append(train_losses)
            val_curves.append(val_losses)
            train_time_curves.append(train_times)

        # Plot training and validation figure for current algorithm
        plt.figure(train_val_figure)
        utils.plot_helper(x_axis=kwargs[max_iters[i]],
                          y_axis=np.array(train_curves),
                          label='{} train'.format(acronyms[i]))
        utils.plot_helper(x_axis=kwargs[max_iters[i]],
                          y_axis=np.array(val_curves),
                          label='{} val'.format(acronyms[i]))

        # Plot training time figure for current algorithm
        plt.figure(train_times_figure)
        utils.plot_helper(x_axis=kwargs[max_iters[i]],
                          y_axis=np.array(train_time_curves),
                          label=acronyms[i])

    # Set title and labels to training and validation figure
    plt.figure(train_val_figure)
    utils.set_plot_title_labels(title='Neural Network - Loss vs. iterations',
                                x_label='Iterations',
                                y_label='Loss')

    # Save figure
    plt.savefig(IMAGE_DIR + 'nn_objective_vs_iterations')

    # Set title and labels to training time figure
    plt.figure(train_times_figure)
    utils.set_plot_title_labels(title='Neural Network - Time vs. iterations',
                                x_label='Iterations',
                                y_label='Time (seconds)')

    # Save figure
    plt.savefig(IMAGE_DIR + 'nn_time_vs_iterations')
コード例 #3
0
def plot_performances(problem, random_seeds, **kwargs):

    # Initialize lists of objectives curves and time curves
    rhc_objectives, sa_objectives, ga_objectives, mimic_objectives = [], [], [], []
    rhc_times, sa_times, ga_times, mimic_times = [], [], [], []

    # Set an exponential decay schedule for SA
    exp_decay = ExpDecay(init_temp=kwargs['sa_init_temp'],
                         exp_const=kwargs['sa_exp_decay_rate'],
                         min_temp=kwargs['sa_min_temp'])

    # For multiple random runs
    for random_seed in random_seeds:

        # Run RHC and get best state and objective found
        _, best_objective, objective_curve = random_hill_climb(
            problem,
            max_attempts=kwargs['rhc_max_iters'],
            max_iters=kwargs['rhc_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        rhc_objectives.append(objective_curve)
        rhc_times.append(utils.times)
        print('\nRHC: best_fitness = {:.3f}'.format(best_objective))

        # Run SA and get best state and objective found
        _, best_objective, objective_curve = simulated_annealing(
            problem,
            schedule=exp_decay,
            max_attempts=kwargs['sa_max_iters'],
            max_iters=kwargs['sa_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        sa_objectives.append(objective_curve)
        sa_times.append(utils.times)
        print('SA: best_fitness = {:.3f}'.format(best_objective))

        # Run GA and get best state and objective found
        _, best_objective, objective_curve = genetic_alg(
            problem,
            pop_size=kwargs['ga_pop_size'],
            pop_breed_percent=1.0 - kwargs['ga_keep_pct'],
            max_attempts=kwargs['ga_max_iters'],
            max_iters=kwargs['ga_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        ga_objectives.append(objective_curve)
        ga_times.append(utils.times)
        print('GA: best_fitness = {:.3f}'.format(best_objective))

        # Run MIMIC and get best state and objective found
        _, best_objective, objective_curve = mimic(
            problem,
            pop_size=kwargs['mimic_pop_size'],
            keep_pct=kwargs['mimic_keep_pct'],
            max_attempts=kwargs['mimic_max_iters'],
            max_iters=kwargs['mimic_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        mimic_objectives.append(objective_curve)
        mimic_times.append(utils.times)
        print('MIMIC: best_fitness = {:.3f}'.format(best_objective))

    # Array of iterations to plot objectives vs. for RHC, SA, GA and MIMIC
    rhc_iterations = np.arange(1, kwargs['rhc_max_iters'] + 1)
    sa_iterations = np.arange(1, kwargs['sa_max_iters'] + 1)
    ga_iterations = np.arange(1, kwargs['ga_max_iters'] + 1)
    mimic_iterations = np.arange(1, kwargs['mimic_max_iters'] + 1)

    # Plot objective curves, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=rhc_iterations,
                      y_axis=np.array(rhc_objectives),
                      label='RHC')
    utils.plot_helper(x_axis=sa_iterations,
                      y_axis=np.array(sa_objectives),
                      label='SA')
    utils.plot_helper(x_axis=ga_iterations,
                      y_axis=np.array(ga_objectives),
                      label='GA')
    utils.plot_helper(x_axis=mimic_iterations,
                      y_axis=np.array(mimic_objectives),
                      label='MIMIC')
    utils.set_plot_title_labels(title='{} - Fitness versus iterations'.format(
        kwargs['plot_name']),
                                x_label='Iterations',
                                y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_fitness_vs_iterations'.format(kwargs['plot_name']))

    # Plot times, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=rhc_iterations,
                      y_axis=np.array(rhc_times),
                      label='RHC')
    utils.plot_helper(x_axis=sa_iterations,
                      y_axis=np.array(sa_times),
                      label='SA')
    utils.plot_helper(x_axis=ga_iterations,
                      y_axis=np.array(ga_times),
                      label='GA')
    utils.plot_helper(x_axis=mimic_iterations,
                      y_axis=np.array(mimic_times),
                      label='MIMIC')
    utils.set_plot_title_labels(title='{} - Time versus iterations'.format(
        kwargs['plot_name']),
                                x_label='Iterations',
                                y_label='Time (seconds)')

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_time_vs_iterations'.format(kwargs['plot_name']))
コード例 #4
0
def plot_ga_mimic_optimization(problem, param_name, random_seeds, **kwargs):

    ga_curve, mimic_curve = [], []  # curves to plot for GA and MIMIC

    # Initialize, for GA and MIMIC, the parameter we don't have to loop through and label for plotting
    if param_name == 'keep_pct':
        ga_pop_size = kwargs['ga_pop_size']
        mimic_pop_size = kwargs['mimic_pop_size']
        label = 'Percentage to keep'
    elif param_name == 'pop_size':
        ga_keep_pct = kwargs['ga_keep_pct']
        mimic_keep_pct = kwargs['mimic_keep_pct']
        label = 'Population size'
    else:
        raise Exception('Param name has to be either pop_size or keep_pct'
                        )  # raise exception if invalid entry

    # For all parameters
    for param in kwargs[param_name + 's']:

        print('\nGA & MIMIC: {} = {:.3f}'.format(param_name, param))
        ga_objectives, mimic_objectives = [], [
        ]  # list of results from multiple random runs for current parameter

        # Initialize, for GA and MIMIC, the parameter we have to loop through
        if param_name == 'keep_pct':
            ga_keep_pct = param
            mimic_keep_pct = param
        elif param_name == 'pop_size':
            ga_pop_size = int(param)
            mimic_pop_size = int(param)

        # For multiple random runs
        for random_seed in random_seeds:

            # Run GA and get best state and objective found
            best_state, best_objective, _ = genetic_alg(
                problem,
                pop_size=ga_pop_size,  # population size
                pop_breed_percent=1.0 - ga_keep_pct,  # percentage to breed
                max_attempts=kwargs['ga_max_iters'],  # unlimited attempts
                max_iters=kwargs['ga_max_iters'],
                curve=False,
                random_state=random_seed)
            ga_objectives.append(best_objective)
            print('GA: best_objective = {:.3f}'.format(best_objective))

            # Run MIMIC and get best state and objective found
            best_state, best_objective, _ = mimic(
                problem,
                pop_size=mimic_pop_size,  # population size
                keep_pct=mimic_keep_pct,  # percentage to keep
                max_attempts=kwargs['mimic_max_iters'],  # unlimited attempts
                max_iters=kwargs['mimic_max_iters'],
                curve=False,
                random_state=random_seed)
            mimic_objectives.append(best_objective)
            print('MIMIC: best_fitness = {:.3f}'.format(best_objective))

        # Append random run to GA and MIMIC curves
        ga_curve.append(ga_objectives)
        mimic_curve.append(mimic_objectives)

    # Plot, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=kwargs[param_name + 's'],
                      y_axis=np.array(ga_curve).T,
                      label='GA')
    utils.plot_helper(x_axis=kwargs[param_name + 's'],
                      y_axis=np.array(mimic_curve).T,
                      label='MIMIC')
    utils.set_plot_title_labels(title='{} - Objective vs. {}'.format(
        kwargs['plot_name'], label),
                                x_label=label,
                                y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_objective_vs_{}'.format(kwargs['plot_name'], param_name))
コード例 #5
0
def plot_performances(problem, random_seeds, **kwargs):
    """Plot performances for RHC, SA, GA and MIMIC.

       Args:
        problem (DiscreteOpt): mlrose Discrete Optimization problem to run.
        random_seeds (list): random seeds to use for averaging results over multiple random runs.
        kwargs (dict): additional arguments to pass for curves plotting:
                   - sa_init_temp (float): SA initial temperature.
                   - sa_min_temp (float): SA minimum temperature.
                   - sa_exp_decay_rate (float): SA temperature exponential decay rate.
                   - sa_max_iters (int): SA maximum number of iterations.
                   - pop_sizes (list or ndarray): lits or array of population sizes to objective plot vs.
                   - keep_pcts (list or ndarray): lits or array of keep percentages to objective plot vs.
                   - ga_pop_size (int): GA population size.
                   - ga_keep_pct (float): GA keep percentage.
                   - ga_max_iters (int): GA maximum number of iterations.
                   - mimic_pop_size (int): MIMIC population size.
                   - mimic_keep_pct (float): MIMIC keep percentage.
                   - mimic_max_iters (int): MIMIC maximum number of iterations.
                   - plot_name (string): name of the plot.
                   - plot_ylabel (string): y axis label.

       Returns:
        None.
       """

    # Initialize lists of objectives curves and time curves
    rhc_objectives, sa_objectives, ga_objectives, mimic_objectives = [], [], [], []
    rhc_times, sa_times, ga_times, mimic_times = [], [], [], []

    # Set an exponential decay schedule for SA
    exp_decay = ExpDecay(init_temp=kwargs['sa_init_temp'],
                         exp_const=kwargs['sa_exp_decay_rate'],
                         min_temp=kwargs['sa_min_temp'])

    # For multiple random runs
    for random_seed in random_seeds:

        # Run RHC and get best state and objective found
        _, best_objective, objective_curve = random_hill_climb(
            problem,
            max_attempts=kwargs['rhc_max_iters'],
            max_iters=kwargs['rhc_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        rhc_objectives.append(objective_curve)
        rhc_times.append(utils.times)
        print('\nRHC: best_objective = {:.3f}'.format(best_objective))

        # Run SA and get best state and objective found
        _, best_objective, objective_curve = simulated_annealing(
            problem,
            schedule=exp_decay,
            max_attempts=kwargs['sa_max_iters'],
            max_iters=kwargs['sa_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        sa_objectives.append(objective_curve)
        sa_times.append(utils.times)
        print('SA: best_objective = {:.3f}'.format(best_objective))

        # Run GA and get best state and objective found
        _, best_objective, objective_curve = genetic_alg(
            problem,
            pop_size=kwargs['ga_pop_size'],
            pop_breed_percent=1.0 - kwargs['ga_keep_pct'],
            max_attempts=kwargs['ga_max_iters'],
            max_iters=kwargs['ga_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        ga_objectives.append(objective_curve)
        ga_times.append(utils.times)
        print('GA: best_objective = {:.3f}'.format(best_objective))

        # Run MIMIC and get best state and objective found
        _, best_objective, objective_curve = mimic(
            problem,
            pop_size=kwargs['mimic_pop_size'],
            keep_pct=kwargs['mimic_keep_pct'],
            max_attempts=kwargs['mimic_max_iters'],
            max_iters=kwargs['mimic_max_iters'],
            curve=True,
            random_state=random_seed,
            state_fitness_callback=utils.time_callback,
            callback_user_info=[])

        mimic_objectives.append(objective_curve)
        mimic_times.append(utils.times)
        print('MIMIC: best_objective = {:.3f}'.format(best_objective))

    # Array of iterations to plot objectives vs. for RHC, SA, GA and MIMIC
    rhc_iterations = np.arange(1, kwargs['rhc_max_iters'] + 1)
    sa_iterations = np.arange(1, kwargs['sa_max_iters'] + 1)
    ga_iterations = np.arange(1, kwargs['ga_max_iters'] + 1)
    mimic_iterations = np.arange(1, kwargs['mimic_max_iters'] + 1)

    # Plot objective curves, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=rhc_iterations,
                      y_axis=np.array(rhc_objectives),
                      label='RHC')
    utils.plot_helper(x_axis=sa_iterations,
                      y_axis=np.array(sa_objectives),
                      label='SA')
    utils.plot_helper(x_axis=ga_iterations,
                      y_axis=np.array(ga_objectives),
                      label='GA')
    utils.plot_helper(x_axis=mimic_iterations,
                      y_axis=np.array(mimic_objectives),
                      label='MIMIC')
    utils.set_plot_title_labels(
        title='{} - Objective versus iterations'.format(kwargs['plot_name']),
        x_label='Iterations',
        y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_objective_vs_iterations'.format(kwargs['plot_name']))

    # Plot times, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=rhc_iterations,
                      y_axis=np.array(rhc_times),
                      label='RHC')
    utils.plot_helper(x_axis=sa_iterations,
                      y_axis=np.array(sa_times),
                      label='SA')
    utils.plot_helper(x_axis=ga_iterations,
                      y_axis=np.array(ga_times),
                      label='GA')
    utils.plot_helper(x_axis=mimic_iterations,
                      y_axis=np.array(mimic_times),
                      label='MIMIC')
    utils.set_plot_title_labels(title='{} - Time versus iterations'.format(
        kwargs['plot_name']),
                                x_label='Iterations',
                                y_label='Time (seconds)')

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_time_vs_iterations'.format(kwargs['plot_name']))
コード例 #6
0
def plot_ga_mimic_optimization(problem, param_name, random_seeds, **kwargs):
    """Plot objective function vs parameter for GA and MIMIC.

        Args:
         problem (DiscreteOpt): mlrose Discrete Optimization problem to run.
         param_name (string): GA and MIMIC param to plot objective vs, 'pop_size' or 'keep_pct'.
         random_seeds (list): random seeds to use for averaging results over multiple random runs.
         kwargs (dict): additional arguments to pass for curves plotting:
                    - pop_sizes (list or ndarray): lits or array of population sizes to objective plot vs.
                    - keep_pcts (list or ndarray): lits or array of keep percentages to objective plot vs.
                    - ga_pop_size (int): GA population size.
                    - ga_keep_pct (float): GA keep percentage.
                    - ga_max_iters (int): GA maximum number of iterations.
                    - mimic_pop_size (int): MIMIC population size.
                    - mimic_keep_pct (float): MIMIC keep percentage.
                    - mimic_max_iters (int): MIMIC maximum number of iterations.
                    - plot_name (string): name of the plot.
                    - plot_ylabel (string): y axis label.

        Returns:
         None.
        """

    ga_curve, mimic_curve = [], []  # curves to plot for GA and MIMIC

    # Initialize, for GA and MIMIC, the parameter we don't have to loop through and label for plotting
    if param_name == 'keep_pct':
        ga_pop_size = kwargs['ga_pop_size']
        mimic_pop_size = kwargs['mimic_pop_size']
        label = 'Percentage to keep'
    elif param_name == 'pop_size':
        ga_keep_pct = kwargs['ga_keep_pct']
        mimic_keep_pct = kwargs['mimic_keep_pct']
        label = 'Population size'
    else:
        raise Exception('Param name has to be either pop_size or keep_pct'
                        )  # raise exception if invalid entry

    # For all parameters
    for param in kwargs[param_name + 's']:

        print('\nGA & MIMIC: {} = {:.3f}'.format(param_name, param))
        ga_objectives, mimic_objectives = [], [
        ]  # list of results from multiple random runs for current parameter

        # Initialize, for GA and MIMIC, the parameter we have to loop through
        if param_name == 'keep_pct':
            ga_keep_pct = param
            mimic_keep_pct = param
        elif param_name == 'pop_size':
            ga_pop_size = int(param)
            mimic_pop_size = int(param)

        # For multiple random runs
        for random_seed in random_seeds:

            # Run GA and get best state and objective found
            best_state, best_objective, _ = genetic_alg(
                problem,
                pop_size=ga_pop_size,  # population size
                pop_breed_percent=1.0 - ga_keep_pct,  # percentage to breed
                max_attempts=kwargs['ga_max_iters'],  # unlimited attempts
                max_iters=kwargs['ga_max_iters'],
                curve=False,
                random_state=random_seed)
            ga_objectives.append(best_objective)
            print('GA: best_objective = {:.3f}'.format(best_objective))

            # Run MIMIC and get best state and objective found
            best_state, best_objective, _ = mimic(
                problem,
                pop_size=mimic_pop_size,  # population size
                keep_pct=mimic_keep_pct,  # percentage to keep
                max_attempts=kwargs['mimic_max_iters'],  # unlimited attempts
                max_iters=kwargs['mimic_max_iters'],
                curve=False,
                random_state=random_seed)
            mimic_objectives.append(best_objective)
            print('MIMIC: best_objective = {:.3f}'.format(best_objective))

        # Append random run to GA and MIMIC curves
        ga_curve.append(ga_objectives)
        mimic_curve.append(mimic_objectives)

    # Plot, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=kwargs[param_name + 's'],
                      y_axis=np.array(ga_curve).T,
                      label='GA')
    utils.plot_helper(x_axis=kwargs[param_name + 's'],
                      y_axis=np.array(mimic_curve).T,
                      label='MIMIC')
    utils.set_plot_title_labels(title='{} - Objective vs. {}'.format(
        kwargs['plot_name'], label),
                                x_label=label,
                                y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR +
                '{}_objective_vs_{}'.format(kwargs['plot_name'], param_name))
コード例 #7
0
def plot_sa_optimization(problem, random_seeds, **kwargs):
    """Plot objective function vs temperature for SA.

        Args:
         problem (DiscreteOpt): mlrose Discrete Optimization problem to run.
         random_seeds (list): random seeds to use for averaging results over multiple random runs.
         kwargs (dict): additional arguments to pass for curves plotting:
                    - sa_decay_rates (list or ndarray): lits or array of exponential decay rates to objective plot vs.
                    - sa_init_temp (float): SA initial temperature.
                    - sa_min_temp (float): SA minimum temperature.
                    - sa_max_iters (int): SA maximum number of iterations.
                    - plot_name (string): name of the plot.
                    - plot_ylabel (string): y axis label.

        Returns:
         None.
        """

    sa_curve = []  # curve to plot for SA

    # For all temperature exponential decay rate
    for exp_decay_rate in kwargs['sa_decay_rates']:

        # Define exponential decay schedule
        print('\nSA: exp decay rate = {:.3f}'.format(exp_decay_rate))
        exp_decay = ExpDecay(init_temp=kwargs['sa_init_temp'],
                             exp_const=exp_decay_rate,
                             min_temp=kwargs['sa_min_temp'])

        sa_objectives = [
        ]  # list of results from multiple random runs for current rate

        # For multiple random runs
        for random_seed in random_seeds:

            # Run SA and get best state and objective found
            best_state, best_objective, _ = simulated_annealing(
                problem,
                schedule=exp_decay,
                max_attempts=kwargs['sa_max_iters'],
                max_iters=kwargs['sa_max_iters'],
                curve=False,
                random_state=random_seed)
            sa_objectives.append(best_objective)
            print('SA: best_fitness = {:.3f}'.format(best_objective))

        sa_curve.append(sa_objectives)  # append random run to SA curve

    # Plot, set title and labels
    plt.figure()
    utils.plot_helper(x_axis=kwargs['sa_decay_rates'],
                      y_axis=np.array(sa_curve).T,
                      label='SA')
    utils.set_plot_title_labels(
        title='{} - Objective vs. temperature exponential decay rate'.format(
            kwargs['plot_name']),
        x_label='Exponential decay rate',
        y_label=kwargs['plot_ylabel'])

    # Save figure
    plt.savefig(IMAGE_DIR + '{}_objective_vs_temp'.format(kwargs['plot_name']))